]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.2.9-201203051840.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.9-201203051840.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index 81c287f..d456d02 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index 5f1739b..1831396 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,48 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
234 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
235 +endif
236 +ifdef CONFIG_CHECKER_PLUGIN
237 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
238 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
239 +endif
240 +endif
241 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
242 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
243 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
244 +ifeq ($(KBUILD_EXTMOD),)
245 +gcc-plugins:
246 + $(Q)$(MAKE) $(build)=tools/gcc
247 +else
248 +gcc-plugins: ;
249 +endif
250 +else
251 +gcc-plugins:
252 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
253 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
254 +else
255 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
256 +endif
257 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
258 +endif
259 +endif
260 +
261 include $(srctree)/arch/$(SRCARCH)/Makefile
262
263 ifneq ($(CONFIG_FRAME_WARN),0)
264 @@ -708,7 +751,7 @@ export mod_strip_cmd
265
266
267 ifeq ($(KBUILD_EXTMOD),)
268 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
269 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
270
271 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
272 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
273 @@ -932,6 +975,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
274
275 # The actual objects are generated when descending,
276 # make sure no implicit rule kicks in
277 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
278 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
279 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280
281 # Handle descending into subdirectories listed in $(vmlinux-dirs)
282 @@ -941,7 +986,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
283 # Error messages still appears in the original language
284
285 PHONY += $(vmlinux-dirs)
286 -$(vmlinux-dirs): prepare scripts
287 +$(vmlinux-dirs): gcc-plugins prepare scripts
288 $(Q)$(MAKE) $(build)=$@
289
290 # Store (new) KERNELRELASE string in include/config/kernel.release
291 @@ -985,6 +1030,7 @@ prepare0: archprepare FORCE
292 $(Q)$(MAKE) $(build)=.
293
294 # All the preparing..
295 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
296 prepare: prepare0
297
298 # Generate some files
299 @@ -1086,6 +1132,8 @@ all: modules
300 # using awk while concatenating to the final file.
301
302 PHONY += modules
303 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
304 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
305 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
306 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
307 @$(kecho) ' Building modules, stage 2.';
308 @@ -1101,7 +1149,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
309
310 # Target to prepare building external modules
311 PHONY += modules_prepare
312 -modules_prepare: prepare scripts
313 +modules_prepare: gcc-plugins prepare scripts
314
315 # Target to install modules
316 PHONY += modules_install
317 @@ -1198,6 +1246,7 @@ distclean: mrproper
318 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
319 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
320 -o -name '.*.rej' \
321 + -o -name '.*.rej' -o -name '*.so' \
322 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
323 -type f -print | xargs rm -f
324
325 @@ -1358,6 +1407,8 @@ PHONY += $(module-dirs) modules
326 $(module-dirs): crmodverdir $(objtree)/Module.symvers
327 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
328
329 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
330 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
331 modules: $(module-dirs)
332 @$(kecho) ' Building modules, stage 2.';
333 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
334 @@ -1484,17 +1535,21 @@ else
335 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
336 endif
337
338 -%.s: %.c prepare scripts FORCE
339 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
341 +%.s: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.i: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.o: %.c prepare scripts FORCE
346 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348 +%.o: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.lst: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 -%.s: %.S prepare scripts FORCE
353 +%.s: %.S gcc-plugins prepare scripts FORCE
354 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
355 -%.o: %.S prepare scripts FORCE
356 +%.o: %.S gcc-plugins prepare scripts FORCE
357 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
358 %.symtypes: %.c prepare scripts FORCE
359 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
360 @@ -1504,11 +1559,15 @@ endif
361 $(cmd_crmodverdir)
362 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
363 $(build)=$(build-dir)
364 -%/: prepare scripts FORCE
365 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
367 +%/: gcc-plugins prepare scripts FORCE
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371 -%.ko: prepare scripts FORCE
372 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374 +%.ko: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir) $(@:.ko=.o)
378 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
379 index 640f909..48b6597 100644
380 --- a/arch/alpha/include/asm/atomic.h
381 +++ b/arch/alpha/include/asm/atomic.h
382 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
383 #define atomic_dec(v) atomic_sub(1,(v))
384 #define atomic64_dec(v) atomic64_sub(1,(v))
385
386 +#define atomic64_read_unchecked(v) atomic64_read(v)
387 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
388 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
389 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
390 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
391 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
392 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
393 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
394 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
395 +
396 #define smp_mb__before_atomic_dec() smp_mb()
397 #define smp_mb__after_atomic_dec() smp_mb()
398 #define smp_mb__before_atomic_inc() smp_mb()
399 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
400 index da5449e..7418343 100644
401 --- a/arch/alpha/include/asm/elf.h
402 +++ b/arch/alpha/include/asm/elf.h
403 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
404
405 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
406
407 +#ifdef CONFIG_PAX_ASLR
408 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
409 +
410 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
411 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
412 +#endif
413 +
414 /* $0 is set by ld.so to a pointer to a function which might be
415 registered using atexit. This provides a mean for the dynamic
416 linker to call DT_FINI functions for shared libraries that have
417 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
418 index de98a73..bd4f1f8 100644
419 --- a/arch/alpha/include/asm/pgtable.h
420 +++ b/arch/alpha/include/asm/pgtable.h
421 @@ -101,6 +101,17 @@ struct vm_area_struct;
422 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
423 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
424 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
425 +
426 +#ifdef CONFIG_PAX_PAGEEXEC
427 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
428 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
429 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
430 +#else
431 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
432 +# define PAGE_COPY_NOEXEC PAGE_COPY
433 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
434 +#endif
435 +
436 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
437
438 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
439 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
440 index 2fd00b7..cfd5069 100644
441 --- a/arch/alpha/kernel/module.c
442 +++ b/arch/alpha/kernel/module.c
443 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
444
445 /* The small sections were sorted to the end of the segment.
446 The following should definitely cover them. */
447 - gp = (u64)me->module_core + me->core_size - 0x8000;
448 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
449 got = sechdrs[me->arch.gotsecindex].sh_addr;
450
451 for (i = 0; i < n; i++) {
452 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
453 index 01e8715..be0e80f 100644
454 --- a/arch/alpha/kernel/osf_sys.c
455 +++ b/arch/alpha/kernel/osf_sys.c
456 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
457 /* At this point: (!vma || addr < vma->vm_end). */
458 if (limit - len < addr)
459 return -ENOMEM;
460 - if (!vma || addr + len <= vma->vm_start)
461 + if (check_heap_stack_gap(vma, addr, len))
462 return addr;
463 addr = vma->vm_end;
464 vma = vma->vm_next;
465 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
466 merely specific addresses, but regions of memory -- perhaps
467 this feature should be incorporated into all ports? */
468
469 +#ifdef CONFIG_PAX_RANDMMAP
470 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
471 +#endif
472 +
473 if (addr) {
474 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
475 if (addr != (unsigned long) -ENOMEM)
476 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
477 }
478
479 /* Next, try allocating at TASK_UNMAPPED_BASE. */
480 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
481 - len, limit);
482 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
483 +
484 if (addr != (unsigned long) -ENOMEM)
485 return addr;
486
487 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
488 index fadd5f8..904e73a 100644
489 --- a/arch/alpha/mm/fault.c
490 +++ b/arch/alpha/mm/fault.c
491 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
492 __reload_thread(pcb);
493 }
494
495 +#ifdef CONFIG_PAX_PAGEEXEC
496 +/*
497 + * PaX: decide what to do with offenders (regs->pc = fault address)
498 + *
499 + * returns 1 when task should be killed
500 + * 2 when patched PLT trampoline was detected
501 + * 3 when unpatched PLT trampoline was detected
502 + */
503 +static int pax_handle_fetch_fault(struct pt_regs *regs)
504 +{
505 +
506 +#ifdef CONFIG_PAX_EMUPLT
507 + int err;
508 +
509 + do { /* PaX: patched PLT emulation #1 */
510 + unsigned int ldah, ldq, jmp;
511 +
512 + err = get_user(ldah, (unsigned int *)regs->pc);
513 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
514 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
515 +
516 + if (err)
517 + break;
518 +
519 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
520 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
521 + jmp == 0x6BFB0000U)
522 + {
523 + unsigned long r27, addr;
524 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
525 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
526 +
527 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
528 + err = get_user(r27, (unsigned long *)addr);
529 + if (err)
530 + break;
531 +
532 + regs->r27 = r27;
533 + regs->pc = r27;
534 + return 2;
535 + }
536 + } while (0);
537 +
538 + do { /* PaX: patched PLT emulation #2 */
539 + unsigned int ldah, lda, br;
540 +
541 + err = get_user(ldah, (unsigned int *)regs->pc);
542 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
543 + err |= get_user(br, (unsigned int *)(regs->pc+8));
544 +
545 + if (err)
546 + break;
547 +
548 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
549 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
550 + (br & 0xFFE00000U) == 0xC3E00000U)
551 + {
552 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
553 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
554 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
555 +
556 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
557 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
558 + return 2;
559 + }
560 + } while (0);
561 +
562 + do { /* PaX: unpatched PLT emulation */
563 + unsigned int br;
564 +
565 + err = get_user(br, (unsigned int *)regs->pc);
566 +
567 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
568 + unsigned int br2, ldq, nop, jmp;
569 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
570 +
571 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
572 + err = get_user(br2, (unsigned int *)addr);
573 + err |= get_user(ldq, (unsigned int *)(addr+4));
574 + err |= get_user(nop, (unsigned int *)(addr+8));
575 + err |= get_user(jmp, (unsigned int *)(addr+12));
576 + err |= get_user(resolver, (unsigned long *)(addr+16));
577 +
578 + if (err)
579 + break;
580 +
581 + if (br2 == 0xC3600000U &&
582 + ldq == 0xA77B000CU &&
583 + nop == 0x47FF041FU &&
584 + jmp == 0x6B7B0000U)
585 + {
586 + regs->r28 = regs->pc+4;
587 + regs->r27 = addr+16;
588 + regs->pc = resolver;
589 + return 3;
590 + }
591 + }
592 + } while (0);
593 +#endif
594 +
595 + return 1;
596 +}
597 +
598 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
599 +{
600 + unsigned long i;
601 +
602 + printk(KERN_ERR "PAX: bytes at PC: ");
603 + for (i = 0; i < 5; i++) {
604 + unsigned int c;
605 + if (get_user(c, (unsigned int *)pc+i))
606 + printk(KERN_CONT "???????? ");
607 + else
608 + printk(KERN_CONT "%08x ", c);
609 + }
610 + printk("\n");
611 +}
612 +#endif
613
614 /*
615 * This routine handles page faults. It determines the address,
616 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
617 good_area:
618 si_code = SEGV_ACCERR;
619 if (cause < 0) {
620 - if (!(vma->vm_flags & VM_EXEC))
621 + if (!(vma->vm_flags & VM_EXEC)) {
622 +
623 +#ifdef CONFIG_PAX_PAGEEXEC
624 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
625 + goto bad_area;
626 +
627 + up_read(&mm->mmap_sem);
628 + switch (pax_handle_fetch_fault(regs)) {
629 +
630 +#ifdef CONFIG_PAX_EMUPLT
631 + case 2:
632 + case 3:
633 + return;
634 +#endif
635 +
636 + }
637 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
638 + do_group_exit(SIGKILL);
639 +#else
640 goto bad_area;
641 +#endif
642 +
643 + }
644 } else if (!cause) {
645 /* Allow reads even for write-only mappings */
646 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
647 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
648 index 86976d0..6610950 100644
649 --- a/arch/arm/include/asm/atomic.h
650 +++ b/arch/arm/include/asm/atomic.h
651 @@ -15,6 +15,10 @@
652 #include <linux/types.h>
653 #include <asm/system.h>
654
655 +#ifdef CONFIG_GENERIC_ATOMIC64
656 +#include <asm-generic/atomic64.h>
657 +#endif
658 +
659 #define ATOMIC_INIT(i) { (i) }
660
661 #ifdef __KERNEL__
662 @@ -239,6 +243,14 @@ typedef struct {
663 u64 __aligned(8) counter;
664 } atomic64_t;
665
666 +#ifdef CONFIG_PAX_REFCOUNT
667 +typedef struct {
668 + u64 __aligned(8) counter;
669 +} atomic64_unchecked_t;
670 +#else
671 +typedef atomic64_t atomic64_unchecked_t;
672 +#endif
673 +
674 #define ATOMIC64_INIT(i) { (i) }
675
676 static inline u64 atomic64_read(atomic64_t *v)
677 @@ -459,6 +471,16 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
678 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
679 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
680
681 +#define atomic64_read_unchecked(v) atomic64_read(v)
682 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
683 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
684 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
685 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
686 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
687 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
688 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
689 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
690 +
691 #endif /* !CONFIG_GENERIC_ATOMIC64 */
692 #endif
693 #endif
694 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
695 index 0e9ce8d..6ef1e03 100644
696 --- a/arch/arm/include/asm/elf.h
697 +++ b/arch/arm/include/asm/elf.h
698 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
699 the loader. We need to make sure that it is out of the way of the program
700 that it will "exec", and that there is sufficient room for the brk. */
701
702 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
703 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
704 +
705 +#ifdef CONFIG_PAX_ASLR
706 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
707 +
708 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
709 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
710 +#endif
711
712 /* When the program starts, a1 contains a pointer to a function to be
713 registered with atexit, as per the SVR4 ABI. A value of 0 means we
714 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
715 extern void elf_set_personality(const struct elf32_hdr *);
716 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
717
718 -struct mm_struct;
719 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
720 -#define arch_randomize_brk arch_randomize_brk
721 -
722 extern int vectors_user_mapping(void);
723 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
724 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
725 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
726 index e51b1e8..32a3113 100644
727 --- a/arch/arm/include/asm/kmap_types.h
728 +++ b/arch/arm/include/asm/kmap_types.h
729 @@ -21,6 +21,7 @@ enum km_type {
730 KM_L1_CACHE,
731 KM_L2_CACHE,
732 KM_KDB,
733 + KM_CLEARPAGE,
734 KM_TYPE_NR
735 };
736
737 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
738 index b293616..96310e5 100644
739 --- a/arch/arm/include/asm/uaccess.h
740 +++ b/arch/arm/include/asm/uaccess.h
741 @@ -22,6 +22,8 @@
742 #define VERIFY_READ 0
743 #define VERIFY_WRITE 1
744
745 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
746 +
747 /*
748 * The exception table consists of pairs of addresses: the first is the
749 * address of an instruction that is allowed to fault, and the second is
750 @@ -387,8 +389,23 @@ do { \
751
752
753 #ifdef CONFIG_MMU
754 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
755 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
756 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
757 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
758 +
759 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
760 +{
761 + if (!__builtin_constant_p(n))
762 + check_object_size(to, n, false);
763 + return ___copy_from_user(to, from, n);
764 +}
765 +
766 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
767 +{
768 + if (!__builtin_constant_p(n))
769 + check_object_size(from, n, true);
770 + return ___copy_to_user(to, from, n);
771 +}
772 +
773 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
774 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
775 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
776 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
777
778 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
779 {
780 + if ((long)n < 0)
781 + return n;
782 +
783 if (access_ok(VERIFY_READ, from, n))
784 n = __copy_from_user(to, from, n);
785 else /* security hole - plug it */
786 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
787
788 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
789 {
790 + if ((long)n < 0)
791 + return n;
792 +
793 if (access_ok(VERIFY_WRITE, to, n))
794 n = __copy_to_user(to, from, n);
795 return n;
796 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
797 index 5b0bce6..becd81c 100644
798 --- a/arch/arm/kernel/armksyms.c
799 +++ b/arch/arm/kernel/armksyms.c
800 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
801 #ifdef CONFIG_MMU
802 EXPORT_SYMBOL(copy_page);
803
804 -EXPORT_SYMBOL(__copy_from_user);
805 -EXPORT_SYMBOL(__copy_to_user);
806 +EXPORT_SYMBOL(___copy_from_user);
807 +EXPORT_SYMBOL(___copy_to_user);
808 EXPORT_SYMBOL(__clear_user);
809
810 EXPORT_SYMBOL(__get_user_1);
811 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
812 index 3d0c6fb..3dcae52 100644
813 --- a/arch/arm/kernel/process.c
814 +++ b/arch/arm/kernel/process.c
815 @@ -28,7 +28,6 @@
816 #include <linux/tick.h>
817 #include <linux/utsname.h>
818 #include <linux/uaccess.h>
819 -#include <linux/random.h>
820 #include <linux/hw_breakpoint.h>
821 #include <linux/cpuidle.h>
822
823 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
824 return 0;
825 }
826
827 -unsigned long arch_randomize_brk(struct mm_struct *mm)
828 -{
829 - unsigned long range_end = mm->brk + 0x02000000;
830 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
831 -}
832 -
833 #ifdef CONFIG_MMU
834 /*
835 * The vectors page is always readable from user space for the
836 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
837 index 99a5727..a3d5bb1 100644
838 --- a/arch/arm/kernel/traps.c
839 +++ b/arch/arm/kernel/traps.c
840 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
841
842 static DEFINE_RAW_SPINLOCK(die_lock);
843
844 +extern void gr_handle_kernel_exploit(void);
845 +
846 /*
847 * This function is protected against re-entrancy.
848 */
849 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
850 panic("Fatal exception in interrupt");
851 if (panic_on_oops)
852 panic("Fatal exception");
853 +
854 + gr_handle_kernel_exploit();
855 +
856 if (ret != NOTIFY_STOP)
857 do_exit(SIGSEGV);
858 }
859 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
860 index 66a477a..bee61d3 100644
861 --- a/arch/arm/lib/copy_from_user.S
862 +++ b/arch/arm/lib/copy_from_user.S
863 @@ -16,7 +16,7 @@
864 /*
865 * Prototype:
866 *
867 - * size_t __copy_from_user(void *to, const void *from, size_t n)
868 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
869 *
870 * Purpose:
871 *
872 @@ -84,11 +84,11 @@
873
874 .text
875
876 -ENTRY(__copy_from_user)
877 +ENTRY(___copy_from_user)
878
879 #include "copy_template.S"
880
881 -ENDPROC(__copy_from_user)
882 +ENDPROC(___copy_from_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
887 index d066df6..df28194 100644
888 --- a/arch/arm/lib/copy_to_user.S
889 +++ b/arch/arm/lib/copy_to_user.S
890 @@ -16,7 +16,7 @@
891 /*
892 * Prototype:
893 *
894 - * size_t __copy_to_user(void *to, const void *from, size_t n)
895 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
896 *
897 * Purpose:
898 *
899 @@ -88,11 +88,11 @@
900 .text
901
902 ENTRY(__copy_to_user_std)
903 -WEAK(__copy_to_user)
904 +WEAK(___copy_to_user)
905
906 #include "copy_template.S"
907
908 -ENDPROC(__copy_to_user)
909 +ENDPROC(___copy_to_user)
910 ENDPROC(__copy_to_user_std)
911
912 .pushsection .fixup,"ax"
913 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
914 index d0ece2a..5ae2f39 100644
915 --- a/arch/arm/lib/uaccess.S
916 +++ b/arch/arm/lib/uaccess.S
917 @@ -20,7 +20,7 @@
918
919 #define PAGE_SHIFT 12
920
921 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
922 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
923 * Purpose : copy a block to user memory from kernel memory
924 * Params : to - user memory
925 * : from - kernel memory
926 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
927 sub r2, r2, ip
928 b .Lc2u_dest_aligned
929
930 -ENTRY(__copy_to_user)
931 +ENTRY(___copy_to_user)
932 stmfd sp!, {r2, r4 - r7, lr}
933 cmp r2, #4
934 blt .Lc2u_not_enough
935 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
936 ldrgtb r3, [r1], #0
937 USER( T(strgtb) r3, [r0], #1) @ May fault
938 b .Lc2u_finished
939 -ENDPROC(__copy_to_user)
940 +ENDPROC(___copy_to_user)
941
942 .pushsection .fixup,"ax"
943 .align 0
944 9001: ldmfd sp!, {r0, r4 - r7, pc}
945 .popsection
946
947 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
948 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
949 * Purpose : copy a block from user memory to kernel memory
950 * Params : to - kernel memory
951 * : from - user memory
952 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
953 sub r2, r2, ip
954 b .Lcfu_dest_aligned
955
956 -ENTRY(__copy_from_user)
957 +ENTRY(___copy_from_user)
958 stmfd sp!, {r0, r2, r4 - r7, lr}
959 cmp r2, #4
960 blt .Lcfu_not_enough
961 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
962 USER( T(ldrgtb) r3, [r1], #1) @ May fault
963 strgtb r3, [r0], #1
964 b .Lcfu_finished
965 -ENDPROC(__copy_from_user)
966 +ENDPROC(___copy_from_user)
967
968 .pushsection .fixup,"ax"
969 .align 0
970 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
971 index 025f742..8432b08 100644
972 --- a/arch/arm/lib/uaccess_with_memcpy.c
973 +++ b/arch/arm/lib/uaccess_with_memcpy.c
974 @@ -104,7 +104,7 @@ out:
975 }
976
977 unsigned long
978 -__copy_to_user(void __user *to, const void *from, unsigned long n)
979 +___copy_to_user(void __user *to, const void *from, unsigned long n)
980 {
981 /*
982 * This test is stubbed out of the main function above to keep
983 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
984 index 2b2d51c..0127490 100644
985 --- a/arch/arm/mach-ux500/mbox-db5500.c
986 +++ b/arch/arm/mach-ux500/mbox-db5500.c
987 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
988 return sprintf(buf, "0x%X\n", mbox_value);
989 }
990
991 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
992 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
993
994 static int mbox_show(struct seq_file *s, void *data)
995 {
996 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
997 index aa33949..b242a2f 100644
998 --- a/arch/arm/mm/fault.c
999 +++ b/arch/arm/mm/fault.c
1000 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1001 }
1002 #endif
1003
1004 +#ifdef CONFIG_PAX_PAGEEXEC
1005 + if (fsr & FSR_LNX_PF) {
1006 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1007 + do_group_exit(SIGKILL);
1008 + }
1009 +#endif
1010 +
1011 tsk->thread.address = addr;
1012 tsk->thread.error_code = fsr;
1013 tsk->thread.trap_no = 14;
1014 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1015 }
1016 #endif /* CONFIG_MMU */
1017
1018 +#ifdef CONFIG_PAX_PAGEEXEC
1019 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1020 +{
1021 + long i;
1022 +
1023 + printk(KERN_ERR "PAX: bytes at PC: ");
1024 + for (i = 0; i < 20; i++) {
1025 + unsigned char c;
1026 + if (get_user(c, (__force unsigned char __user *)pc+i))
1027 + printk(KERN_CONT "?? ");
1028 + else
1029 + printk(KERN_CONT "%02x ", c);
1030 + }
1031 + printk("\n");
1032 +
1033 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1034 + for (i = -1; i < 20; i++) {
1035 + unsigned long c;
1036 + if (get_user(c, (__force unsigned long __user *)sp+i))
1037 + printk(KERN_CONT "???????? ");
1038 + else
1039 + printk(KERN_CONT "%08lx ", c);
1040 + }
1041 + printk("\n");
1042 +}
1043 +#endif
1044 +
1045 /*
1046 * First Level Translation Fault Handler
1047 *
1048 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1049 index 44b628e..623ee2a 100644
1050 --- a/arch/arm/mm/mmap.c
1051 +++ b/arch/arm/mm/mmap.c
1052 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1053 if (len > TASK_SIZE)
1054 return -ENOMEM;
1055
1056 +#ifdef CONFIG_PAX_RANDMMAP
1057 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1058 +#endif
1059 +
1060 if (addr) {
1061 if (do_align)
1062 addr = COLOUR_ALIGN(addr, pgoff);
1063 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1064 addr = PAGE_ALIGN(addr);
1065
1066 vma = find_vma(mm, addr);
1067 - if (TASK_SIZE - len >= addr &&
1068 - (!vma || addr + len <= vma->vm_start))
1069 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1070 return addr;
1071 }
1072 if (len > mm->cached_hole_size) {
1073 - start_addr = addr = mm->free_area_cache;
1074 + start_addr = addr = mm->free_area_cache;
1075 } else {
1076 - start_addr = addr = TASK_UNMAPPED_BASE;
1077 - mm->cached_hole_size = 0;
1078 + start_addr = addr = mm->mmap_base;
1079 + mm->cached_hole_size = 0;
1080 }
1081 /* 8 bits of randomness in 20 address space bits */
1082 if ((current->flags & PF_RANDOMIZE) &&
1083 @@ -89,14 +92,14 @@ full_search:
1084 * Start a new search - just in case we missed
1085 * some holes.
1086 */
1087 - if (start_addr != TASK_UNMAPPED_BASE) {
1088 - start_addr = addr = TASK_UNMAPPED_BASE;
1089 + if (start_addr != mm->mmap_base) {
1090 + start_addr = addr = mm->mmap_base;
1091 mm->cached_hole_size = 0;
1092 goto full_search;
1093 }
1094 return -ENOMEM;
1095 }
1096 - if (!vma || addr + len <= vma->vm_start) {
1097 + if (check_heap_stack_gap(vma, addr, len)) {
1098 /*
1099 * Remember the place where we stopped the search:
1100 */
1101 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1102 index 3b3159b..425ea94 100644
1103 --- a/arch/avr32/include/asm/elf.h
1104 +++ b/arch/avr32/include/asm/elf.h
1105 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1106 the loader. We need to make sure that it is out of the way of the program
1107 that it will "exec", and that there is sufficient room for the brk. */
1108
1109 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1110 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1111
1112 +#ifdef CONFIG_PAX_ASLR
1113 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1114 +
1115 +#define PAX_DELTA_MMAP_LEN 15
1116 +#define PAX_DELTA_STACK_LEN 15
1117 +#endif
1118
1119 /* This yields a mask that user programs can use to figure out what
1120 instruction set this CPU supports. This could be done in user space,
1121 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1122 index b7f5c68..556135c 100644
1123 --- a/arch/avr32/include/asm/kmap_types.h
1124 +++ b/arch/avr32/include/asm/kmap_types.h
1125 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1126 D(11) KM_IRQ1,
1127 D(12) KM_SOFTIRQ0,
1128 D(13) KM_SOFTIRQ1,
1129 -D(14) KM_TYPE_NR
1130 +D(14) KM_CLEARPAGE,
1131 +D(15) KM_TYPE_NR
1132 };
1133
1134 #undef D
1135 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1136 index f7040a1..db9f300 100644
1137 --- a/arch/avr32/mm/fault.c
1138 +++ b/arch/avr32/mm/fault.c
1139 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1140
1141 int exception_trace = 1;
1142
1143 +#ifdef CONFIG_PAX_PAGEEXEC
1144 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1145 +{
1146 + unsigned long i;
1147 +
1148 + printk(KERN_ERR "PAX: bytes at PC: ");
1149 + for (i = 0; i < 20; i++) {
1150 + unsigned char c;
1151 + if (get_user(c, (unsigned char *)pc+i))
1152 + printk(KERN_CONT "???????? ");
1153 + else
1154 + printk(KERN_CONT "%02x ", c);
1155 + }
1156 + printk("\n");
1157 +}
1158 +#endif
1159 +
1160 /*
1161 * This routine handles page faults. It determines the address and the
1162 * problem, and then passes it off to one of the appropriate routines.
1163 @@ -156,6 +173,16 @@ bad_area:
1164 up_read(&mm->mmap_sem);
1165
1166 if (user_mode(regs)) {
1167 +
1168 +#ifdef CONFIG_PAX_PAGEEXEC
1169 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1170 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1171 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1172 + do_group_exit(SIGKILL);
1173 + }
1174 + }
1175 +#endif
1176 +
1177 if (exception_trace && printk_ratelimit())
1178 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1179 "sp %08lx ecr %lu\n",
1180 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1181 index 0d8a7d6..d0c9ff5 100644
1182 --- a/arch/frv/include/asm/atomic.h
1183 +++ b/arch/frv/include/asm/atomic.h
1184 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1185 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1186 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1187
1188 +#define atomic64_read_unchecked(v) atomic64_read(v)
1189 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1190 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1191 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1192 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1193 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1194 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1195 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1196 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1197 +
1198 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
1199 {
1200 int c, old;
1201 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1202 index f8e16b2..c73ff79 100644
1203 --- a/arch/frv/include/asm/kmap_types.h
1204 +++ b/arch/frv/include/asm/kmap_types.h
1205 @@ -23,6 +23,7 @@ enum km_type {
1206 KM_IRQ1,
1207 KM_SOFTIRQ0,
1208 KM_SOFTIRQ1,
1209 + KM_CLEARPAGE,
1210 KM_TYPE_NR
1211 };
1212
1213 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1214 index 385fd30..6c3d97e 100644
1215 --- a/arch/frv/mm/elf-fdpic.c
1216 +++ b/arch/frv/mm/elf-fdpic.c
1217 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1218 if (addr) {
1219 addr = PAGE_ALIGN(addr);
1220 vma = find_vma(current->mm, addr);
1221 - if (TASK_SIZE - len >= addr &&
1222 - (!vma || addr + len <= vma->vm_start))
1223 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1224 goto success;
1225 }
1226
1227 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1228 for (; vma; vma = vma->vm_next) {
1229 if (addr > limit)
1230 break;
1231 - if (addr + len <= vma->vm_start)
1232 + if (check_heap_stack_gap(vma, addr, len))
1233 goto success;
1234 addr = vma->vm_end;
1235 }
1236 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1237 for (; vma; vma = vma->vm_next) {
1238 if (addr > limit)
1239 break;
1240 - if (addr + len <= vma->vm_start)
1241 + if (check_heap_stack_gap(vma, addr, len))
1242 goto success;
1243 addr = vma->vm_end;
1244 }
1245 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1246 index 3fad89e..3047da5 100644
1247 --- a/arch/ia64/include/asm/atomic.h
1248 +++ b/arch/ia64/include/asm/atomic.h
1249 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1250 #define atomic64_inc(v) atomic64_add(1, (v))
1251 #define atomic64_dec(v) atomic64_sub(1, (v))
1252
1253 +#define atomic64_read_unchecked(v) atomic64_read(v)
1254 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1255 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1256 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1257 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1258 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1259 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1260 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1261 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1262 +
1263 /* Atomic operations are already serializing */
1264 #define smp_mb__before_atomic_dec() barrier()
1265 #define smp_mb__after_atomic_dec() barrier()
1266 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1267 index b5298eb..67c6e62 100644
1268 --- a/arch/ia64/include/asm/elf.h
1269 +++ b/arch/ia64/include/asm/elf.h
1270 @@ -42,6 +42,13 @@
1271 */
1272 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1273
1274 +#ifdef CONFIG_PAX_ASLR
1275 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1276 +
1277 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1278 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1279 +#endif
1280 +
1281 #define PT_IA_64_UNWIND 0x70000001
1282
1283 /* IA-64 relocations: */
1284 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1285 index 1a97af3..7529d31 100644
1286 --- a/arch/ia64/include/asm/pgtable.h
1287 +++ b/arch/ia64/include/asm/pgtable.h
1288 @@ -12,7 +12,7 @@
1289 * David Mosberger-Tang <davidm@hpl.hp.com>
1290 */
1291
1292 -
1293 +#include <linux/const.h>
1294 #include <asm/mman.h>
1295 #include <asm/page.h>
1296 #include <asm/processor.h>
1297 @@ -143,6 +143,17 @@
1298 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1299 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1300 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1301 +
1302 +#ifdef CONFIG_PAX_PAGEEXEC
1303 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1304 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1305 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1306 +#else
1307 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1308 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +#endif
1311 +
1312 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1313 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1314 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1315 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1316 index b77768d..e0795eb 100644
1317 --- a/arch/ia64/include/asm/spinlock.h
1318 +++ b/arch/ia64/include/asm/spinlock.h
1319 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1320 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1321
1322 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1323 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1324 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1325 }
1326
1327 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1328 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1329 index 449c8c0..432a3d2 100644
1330 --- a/arch/ia64/include/asm/uaccess.h
1331 +++ b/arch/ia64/include/asm/uaccess.h
1332 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1333 const void *__cu_from = (from); \
1334 long __cu_len = (n); \
1335 \
1336 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1337 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1338 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1339 __cu_len; \
1340 })
1341 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1342 long __cu_len = (n); \
1343 \
1344 __chk_user_ptr(__cu_from); \
1345 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1346 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1348 __cu_len; \
1349 })
1350 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1351 index 24603be..948052d 100644
1352 --- a/arch/ia64/kernel/module.c
1353 +++ b/arch/ia64/kernel/module.c
1354 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1355 void
1356 module_free (struct module *mod, void *module_region)
1357 {
1358 - if (mod && mod->arch.init_unw_table &&
1359 - module_region == mod->module_init) {
1360 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1361 unw_remove_unwind_table(mod->arch.init_unw_table);
1362 mod->arch.init_unw_table = NULL;
1363 }
1364 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1365 }
1366
1367 static inline int
1368 +in_init_rx (const struct module *mod, uint64_t addr)
1369 +{
1370 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1371 +}
1372 +
1373 +static inline int
1374 +in_init_rw (const struct module *mod, uint64_t addr)
1375 +{
1376 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1377 +}
1378 +
1379 +static inline int
1380 in_init (const struct module *mod, uint64_t addr)
1381 {
1382 - return addr - (uint64_t) mod->module_init < mod->init_size;
1383 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1384 +}
1385 +
1386 +static inline int
1387 +in_core_rx (const struct module *mod, uint64_t addr)
1388 +{
1389 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1390 +}
1391 +
1392 +static inline int
1393 +in_core_rw (const struct module *mod, uint64_t addr)
1394 +{
1395 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1396 }
1397
1398 static inline int
1399 in_core (const struct module *mod, uint64_t addr)
1400 {
1401 - return addr - (uint64_t) mod->module_core < mod->core_size;
1402 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1403 }
1404
1405 static inline int
1406 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1407 break;
1408
1409 case RV_BDREL:
1410 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1411 + if (in_init_rx(mod, val))
1412 + val -= (uint64_t) mod->module_init_rx;
1413 + else if (in_init_rw(mod, val))
1414 + val -= (uint64_t) mod->module_init_rw;
1415 + else if (in_core_rx(mod, val))
1416 + val -= (uint64_t) mod->module_core_rx;
1417 + else if (in_core_rw(mod, val))
1418 + val -= (uint64_t) mod->module_core_rw;
1419 break;
1420
1421 case RV_LTV:
1422 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1423 * addresses have been selected...
1424 */
1425 uint64_t gp;
1426 - if (mod->core_size > MAX_LTOFF)
1427 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1428 /*
1429 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1430 * at the end of the module.
1431 */
1432 - gp = mod->core_size - MAX_LTOFF / 2;
1433 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1434 else
1435 - gp = mod->core_size / 2;
1436 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1437 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1438 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1439 mod->arch.gp = gp;
1440 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1441 }
1442 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1443 index 609d500..7dde2a8 100644
1444 --- a/arch/ia64/kernel/sys_ia64.c
1445 +++ b/arch/ia64/kernel/sys_ia64.c
1446 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1447 if (REGION_NUMBER(addr) == RGN_HPAGE)
1448 addr = 0;
1449 #endif
1450 +
1451 +#ifdef CONFIG_PAX_RANDMMAP
1452 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1453 + addr = mm->free_area_cache;
1454 + else
1455 +#endif
1456 +
1457 if (!addr)
1458 addr = mm->free_area_cache;
1459
1460 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1461 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1462 /* At this point: (!vma || addr < vma->vm_end). */
1463 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1464 - if (start_addr != TASK_UNMAPPED_BASE) {
1465 + if (start_addr != mm->mmap_base) {
1466 /* Start a new search --- just in case we missed some holes. */
1467 - addr = TASK_UNMAPPED_BASE;
1468 + addr = mm->mmap_base;
1469 goto full_search;
1470 }
1471 return -ENOMEM;
1472 }
1473 - if (!vma || addr + len <= vma->vm_start) {
1474 + if (check_heap_stack_gap(vma, addr, len)) {
1475 /* Remember the address where we stopped this search: */
1476 mm->free_area_cache = addr + len;
1477 return addr;
1478 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1479 index 53c0ba0..2accdde 100644
1480 --- a/arch/ia64/kernel/vmlinux.lds.S
1481 +++ b/arch/ia64/kernel/vmlinux.lds.S
1482 @@ -199,7 +199,7 @@ SECTIONS {
1483 /* Per-cpu data: */
1484 . = ALIGN(PERCPU_PAGE_SIZE);
1485 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1486 - __phys_per_cpu_start = __per_cpu_load;
1487 + __phys_per_cpu_start = per_cpu_load;
1488 /*
1489 * ensure percpu data fits
1490 * into percpu page size
1491 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1492 index 20b3593..1ce77f0 100644
1493 --- a/arch/ia64/mm/fault.c
1494 +++ b/arch/ia64/mm/fault.c
1495 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1496 return pte_present(pte);
1497 }
1498
1499 +#ifdef CONFIG_PAX_PAGEEXEC
1500 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1501 +{
1502 + unsigned long i;
1503 +
1504 + printk(KERN_ERR "PAX: bytes at PC: ");
1505 + for (i = 0; i < 8; i++) {
1506 + unsigned int c;
1507 + if (get_user(c, (unsigned int *)pc+i))
1508 + printk(KERN_CONT "???????? ");
1509 + else
1510 + printk(KERN_CONT "%08x ", c);
1511 + }
1512 + printk("\n");
1513 +}
1514 +#endif
1515 +
1516 void __kprobes
1517 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1518 {
1519 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1520 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1521 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1522
1523 - if ((vma->vm_flags & mask) != mask)
1524 + if ((vma->vm_flags & mask) != mask) {
1525 +
1526 +#ifdef CONFIG_PAX_PAGEEXEC
1527 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1528 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1529 + goto bad_area;
1530 +
1531 + up_read(&mm->mmap_sem);
1532 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1533 + do_group_exit(SIGKILL);
1534 + }
1535 +#endif
1536 +
1537 goto bad_area;
1538
1539 + }
1540 +
1541 /*
1542 * If for any reason at all we couldn't handle the fault, make
1543 * sure we exit gracefully rather than endlessly redo the
1544 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1545 index 5ca674b..e0e1b70 100644
1546 --- a/arch/ia64/mm/hugetlbpage.c
1547 +++ b/arch/ia64/mm/hugetlbpage.c
1548 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1549 /* At this point: (!vmm || addr < vmm->vm_end). */
1550 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1551 return -ENOMEM;
1552 - if (!vmm || (addr + len) <= vmm->vm_start)
1553 + if (check_heap_stack_gap(vmm, addr, len))
1554 return addr;
1555 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1556 }
1557 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1558 index 00cb0e2..2ad8024 100644
1559 --- a/arch/ia64/mm/init.c
1560 +++ b/arch/ia64/mm/init.c
1561 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1562 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1563 vma->vm_end = vma->vm_start + PAGE_SIZE;
1564 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1565 +
1566 +#ifdef CONFIG_PAX_PAGEEXEC
1567 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1568 + vma->vm_flags &= ~VM_EXEC;
1569 +
1570 +#ifdef CONFIG_PAX_MPROTECT
1571 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1572 + vma->vm_flags &= ~VM_MAYEXEC;
1573 +#endif
1574 +
1575 + }
1576 +#endif
1577 +
1578 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1579 down_write(&current->mm->mmap_sem);
1580 if (insert_vm_struct(current->mm, vma)) {
1581 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1582 index 82abd15..d95ae5d 100644
1583 --- a/arch/m32r/lib/usercopy.c
1584 +++ b/arch/m32r/lib/usercopy.c
1585 @@ -14,6 +14,9 @@
1586 unsigned long
1587 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1588 {
1589 + if ((long)n < 0)
1590 + return n;
1591 +
1592 prefetch(from);
1593 if (access_ok(VERIFY_WRITE, to, n))
1594 __copy_user(to,from,n);
1595 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1596 unsigned long
1597 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1598 {
1599 + if ((long)n < 0)
1600 + return n;
1601 +
1602 prefetchw(to);
1603 if (access_ok(VERIFY_READ, from, n))
1604 __copy_user_zeroing(to,from,n);
1605 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
1606 index 1d93f81..67794d0 100644
1607 --- a/arch/mips/include/asm/atomic.h
1608 +++ b/arch/mips/include/asm/atomic.h
1609 @@ -21,6 +21,10 @@
1610 #include <asm/war.h>
1611 #include <asm/system.h>
1612
1613 +#ifdef CONFIG_GENERIC_ATOMIC64
1614 +#include <asm-generic/atomic64.h>
1615 +#endif
1616 +
1617 #define ATOMIC_INIT(i) { (i) }
1618
1619 /*
1620 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1621 */
1622 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
1623
1624 +#define atomic64_read_unchecked(v) atomic64_read(v)
1625 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1626 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1627 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1628 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1629 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1630 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1631 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1632 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1633 +
1634 #endif /* CONFIG_64BIT */
1635
1636 /*
1637 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1638 index 455c0ac..ad65fbe 100644
1639 --- a/arch/mips/include/asm/elf.h
1640 +++ b/arch/mips/include/asm/elf.h
1641 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1642 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1643 #endif
1644
1645 +#ifdef CONFIG_PAX_ASLR
1646 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1647 +
1648 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1649 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1650 +#endif
1651 +
1652 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1653 struct linux_binprm;
1654 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1655 int uses_interp);
1656
1657 -struct mm_struct;
1658 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1659 -#define arch_randomize_brk arch_randomize_brk
1660 -
1661 #endif /* _ASM_ELF_H */
1662 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1663 index e59cd1a..8e329d6 100644
1664 --- a/arch/mips/include/asm/page.h
1665 +++ b/arch/mips/include/asm/page.h
1666 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1667 #ifdef CONFIG_CPU_MIPS32
1668 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1669 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1670 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1671 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1672 #else
1673 typedef struct { unsigned long long pte; } pte_t;
1674 #define pte_val(x) ((x).pte)
1675 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1676 index 6018c80..7c37203 100644
1677 --- a/arch/mips/include/asm/system.h
1678 +++ b/arch/mips/include/asm/system.h
1679 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1680 */
1681 #define __ARCH_WANT_UNLOCKED_CTXSW
1682
1683 -extern unsigned long arch_align_stack(unsigned long sp);
1684 +#define arch_align_stack(x) ((x) & ~0xfUL)
1685
1686 #endif /* _ASM_SYSTEM_H */
1687 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1688 index 9fdd8bc..4bd7f1a 100644
1689 --- a/arch/mips/kernel/binfmt_elfn32.c
1690 +++ b/arch/mips/kernel/binfmt_elfn32.c
1691 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1692 #undef ELF_ET_DYN_BASE
1693 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1694
1695 +#ifdef CONFIG_PAX_ASLR
1696 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1697 +
1698 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1699 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1700 +#endif
1701 +
1702 #include <asm/processor.h>
1703 #include <linux/module.h>
1704 #include <linux/elfcore.h>
1705 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1706 index ff44823..97f8906 100644
1707 --- a/arch/mips/kernel/binfmt_elfo32.c
1708 +++ b/arch/mips/kernel/binfmt_elfo32.c
1709 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1710 #undef ELF_ET_DYN_BASE
1711 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1712
1713 +#ifdef CONFIG_PAX_ASLR
1714 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1715 +
1716 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1717 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1718 +#endif
1719 +
1720 #include <asm/processor.h>
1721
1722 /*
1723 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1724 index c47f96e..661d418 100644
1725 --- a/arch/mips/kernel/process.c
1726 +++ b/arch/mips/kernel/process.c
1727 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1728 out:
1729 return pc;
1730 }
1731 -
1732 -/*
1733 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1734 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1735 - */
1736 -unsigned long arch_align_stack(unsigned long sp)
1737 -{
1738 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1739 - sp -= get_random_int() & ~PAGE_MASK;
1740 -
1741 - return sp & ALMASK;
1742 -}
1743 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1744 index 937cf33..adb39bb 100644
1745 --- a/arch/mips/mm/fault.c
1746 +++ b/arch/mips/mm/fault.c
1747 @@ -28,6 +28,23 @@
1748 #include <asm/highmem.h> /* For VMALLOC_END */
1749 #include <linux/kdebug.h>
1750
1751 +#ifdef CONFIG_PAX_PAGEEXEC
1752 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1753 +{
1754 + unsigned long i;
1755 +
1756 + printk(KERN_ERR "PAX: bytes at PC: ");
1757 + for (i = 0; i < 5; i++) {
1758 + unsigned int c;
1759 + if (get_user(c, (unsigned int *)pc+i))
1760 + printk(KERN_CONT "???????? ");
1761 + else
1762 + printk(KERN_CONT "%08x ", c);
1763 + }
1764 + printk("\n");
1765 +}
1766 +#endif
1767 +
1768 /*
1769 * This routine handles page faults. It determines the address,
1770 * and the problem, and then passes it off to one of the appropriate
1771 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1772 index 302d779..7d35bf8 100644
1773 --- a/arch/mips/mm/mmap.c
1774 +++ b/arch/mips/mm/mmap.c
1775 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1776 do_color_align = 1;
1777
1778 /* requesting a specific address */
1779 +
1780 +#ifdef CONFIG_PAX_RANDMMAP
1781 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1782 +#endif
1783 +
1784 if (addr) {
1785 if (do_color_align)
1786 addr = COLOUR_ALIGN(addr, pgoff);
1787 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1788 addr = PAGE_ALIGN(addr);
1789
1790 vma = find_vma(mm, addr);
1791 - if (TASK_SIZE - len >= addr &&
1792 - (!vma || addr + len <= vma->vm_start))
1793 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1794 return addr;
1795 }
1796
1797 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1798 /* At this point: (!vma || addr < vma->vm_end). */
1799 if (TASK_SIZE - len < addr)
1800 return -ENOMEM;
1801 - if (!vma || addr + len <= vma->vm_start)
1802 + if (check_heap_stack_gap(vmm, addr, len))
1803 return addr;
1804 addr = vma->vm_end;
1805 if (do_color_align)
1806 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1807 /* make sure it can fit in the remaining address space */
1808 if (likely(addr > len)) {
1809 vma = find_vma(mm, addr - len);
1810 - if (!vma || addr <= vma->vm_start) {
1811 + if (check_heap_stack_gap(vmm, addr - len, len))
1812 /* cache the address as a hint for next time */
1813 return mm->free_area_cache = addr - len;
1814 }
1815 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1816 * return with success:
1817 */
1818 vma = find_vma(mm, addr);
1819 - if (likely(!vma || addr + len <= vma->vm_start)) {
1820 + if (check_heap_stack_gap(vmm, addr, len)) {
1821 /* cache the address as a hint for next time */
1822 return mm->free_area_cache = addr;
1823 }
1824 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1825 mm->unmap_area = arch_unmap_area_topdown;
1826 }
1827 }
1828 -
1829 -static inline unsigned long brk_rnd(void)
1830 -{
1831 - unsigned long rnd = get_random_int();
1832 -
1833 - rnd = rnd << PAGE_SHIFT;
1834 - /* 8MB for 32bit, 256MB for 64bit */
1835 - if (TASK_IS_32BIT_ADDR)
1836 - rnd = rnd & 0x7ffffful;
1837 - else
1838 - rnd = rnd & 0xffffffful;
1839 -
1840 - return rnd;
1841 -}
1842 -
1843 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1844 -{
1845 - unsigned long base = mm->brk;
1846 - unsigned long ret;
1847 -
1848 - ret = PAGE_ALIGN(base + brk_rnd());
1849 -
1850 - if (ret < mm->brk)
1851 - return mm->brk;
1852 -
1853 - return ret;
1854 -}
1855 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
1856 index 4054b31..a10c105 100644
1857 --- a/arch/parisc/include/asm/atomic.h
1858 +++ b/arch/parisc/include/asm/atomic.h
1859 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1860
1861 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
1862
1863 +#define atomic64_read_unchecked(v) atomic64_read(v)
1864 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1865 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1866 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1867 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1868 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1869 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1870 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1871 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1872 +
1873 #endif /* !CONFIG_64BIT */
1874
1875
1876 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1877 index 19f6cb1..6c78cf2 100644
1878 --- a/arch/parisc/include/asm/elf.h
1879 +++ b/arch/parisc/include/asm/elf.h
1880 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1881
1882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1883
1884 +#ifdef CONFIG_PAX_ASLR
1885 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1886 +
1887 +#define PAX_DELTA_MMAP_LEN 16
1888 +#define PAX_DELTA_STACK_LEN 16
1889 +#endif
1890 +
1891 /* This yields a mask that user programs can use to figure out what
1892 instruction set this CPU supports. This could be done in user space,
1893 but it's not easy, and we've already done it here. */
1894 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1895 index 22dadeb..f6c2be4 100644
1896 --- a/arch/parisc/include/asm/pgtable.h
1897 +++ b/arch/parisc/include/asm/pgtable.h
1898 @@ -210,6 +210,17 @@ struct vm_area_struct;
1899 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1900 #define PAGE_COPY PAGE_EXECREAD
1901 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1902 +
1903 +#ifdef CONFIG_PAX_PAGEEXEC
1904 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1905 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1906 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1907 +#else
1908 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1909 +# define PAGE_COPY_NOEXEC PAGE_COPY
1910 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1911 +#endif
1912 +
1913 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1914 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1915 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1916 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1917 index 5e34ccf..672bc9c 100644
1918 --- a/arch/parisc/kernel/module.c
1919 +++ b/arch/parisc/kernel/module.c
1920 @@ -98,16 +98,38 @@
1921
1922 /* three functions to determine where in the module core
1923 * or init pieces the location is */
1924 +static inline int in_init_rx(struct module *me, void *loc)
1925 +{
1926 + return (loc >= me->module_init_rx &&
1927 + loc < (me->module_init_rx + me->init_size_rx));
1928 +}
1929 +
1930 +static inline int in_init_rw(struct module *me, void *loc)
1931 +{
1932 + return (loc >= me->module_init_rw &&
1933 + loc < (me->module_init_rw + me->init_size_rw));
1934 +}
1935 +
1936 static inline int in_init(struct module *me, void *loc)
1937 {
1938 - return (loc >= me->module_init &&
1939 - loc <= (me->module_init + me->init_size));
1940 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1941 +}
1942 +
1943 +static inline int in_core_rx(struct module *me, void *loc)
1944 +{
1945 + return (loc >= me->module_core_rx &&
1946 + loc < (me->module_core_rx + me->core_size_rx));
1947 +}
1948 +
1949 +static inline int in_core_rw(struct module *me, void *loc)
1950 +{
1951 + return (loc >= me->module_core_rw &&
1952 + loc < (me->module_core_rw + me->core_size_rw));
1953 }
1954
1955 static inline int in_core(struct module *me, void *loc)
1956 {
1957 - return (loc >= me->module_core &&
1958 - loc <= (me->module_core + me->core_size));
1959 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1960 }
1961
1962 static inline int in_local(struct module *me, void *loc)
1963 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1964 }
1965
1966 /* align things a bit */
1967 - me->core_size = ALIGN(me->core_size, 16);
1968 - me->arch.got_offset = me->core_size;
1969 - me->core_size += gots * sizeof(struct got_entry);
1970 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1971 + me->arch.got_offset = me->core_size_rw;
1972 + me->core_size_rw += gots * sizeof(struct got_entry);
1973
1974 - me->core_size = ALIGN(me->core_size, 16);
1975 - me->arch.fdesc_offset = me->core_size;
1976 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1977 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1978 + me->arch.fdesc_offset = me->core_size_rw;
1979 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1980
1981 me->arch.got_max = gots;
1982 me->arch.fdesc_max = fdescs;
1983 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1984
1985 BUG_ON(value == 0);
1986
1987 - got = me->module_core + me->arch.got_offset;
1988 + got = me->module_core_rw + me->arch.got_offset;
1989 for (i = 0; got[i].addr; i++)
1990 if (got[i].addr == value)
1991 goto out;
1992 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1993 #ifdef CONFIG_64BIT
1994 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1995 {
1996 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1997 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1998
1999 if (!value) {
2000 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2001 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2002
2003 /* Create new one */
2004 fdesc->addr = value;
2005 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2006 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2007 return (Elf_Addr)fdesc;
2008 }
2009 #endif /* CONFIG_64BIT */
2010 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
2011
2012 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2013 end = table + sechdrs[me->arch.unwind_section].sh_size;
2014 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2015 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2016
2017 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2018 me->arch.unwind_section, table, end, gp);
2019 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2020 index c9b9322..02d8940 100644
2021 --- a/arch/parisc/kernel/sys_parisc.c
2022 +++ b/arch/parisc/kernel/sys_parisc.c
2023 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2024 /* At this point: (!vma || addr < vma->vm_end). */
2025 if (TASK_SIZE - len < addr)
2026 return -ENOMEM;
2027 - if (!vma || addr + len <= vma->vm_start)
2028 + if (check_heap_stack_gap(vma, addr, len))
2029 return addr;
2030 addr = vma->vm_end;
2031 }
2032 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2033 /* At this point: (!vma || addr < vma->vm_end). */
2034 if (TASK_SIZE - len < addr)
2035 return -ENOMEM;
2036 - if (!vma || addr + len <= vma->vm_start)
2037 + if (check_heap_stack_gap(vma, addr, len))
2038 return addr;
2039 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2040 if (addr < vma->vm_end) /* handle wraparound */
2041 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2042 if (flags & MAP_FIXED)
2043 return addr;
2044 if (!addr)
2045 - addr = TASK_UNMAPPED_BASE;
2046 + addr = current->mm->mmap_base;
2047
2048 if (filp) {
2049 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2050 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2051 index f19e660..414fe24 100644
2052 --- a/arch/parisc/kernel/traps.c
2053 +++ b/arch/parisc/kernel/traps.c
2054 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2055
2056 down_read(&current->mm->mmap_sem);
2057 vma = find_vma(current->mm,regs->iaoq[0]);
2058 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2059 - && (vma->vm_flags & VM_EXEC)) {
2060 -
2061 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2062 fault_address = regs->iaoq[0];
2063 fault_space = regs->iasq[0];
2064
2065 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2066 index 18162ce..94de376 100644
2067 --- a/arch/parisc/mm/fault.c
2068 +++ b/arch/parisc/mm/fault.c
2069 @@ -15,6 +15,7 @@
2070 #include <linux/sched.h>
2071 #include <linux/interrupt.h>
2072 #include <linux/module.h>
2073 +#include <linux/unistd.h>
2074
2075 #include <asm/uaccess.h>
2076 #include <asm/traps.h>
2077 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2078 static unsigned long
2079 parisc_acctyp(unsigned long code, unsigned int inst)
2080 {
2081 - if (code == 6 || code == 16)
2082 + if (code == 6 || code == 7 || code == 16)
2083 return VM_EXEC;
2084
2085 switch (inst & 0xf0000000) {
2086 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2087 }
2088 #endif
2089
2090 +#ifdef CONFIG_PAX_PAGEEXEC
2091 +/*
2092 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2093 + *
2094 + * returns 1 when task should be killed
2095 + * 2 when rt_sigreturn trampoline was detected
2096 + * 3 when unpatched PLT trampoline was detected
2097 + */
2098 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2099 +{
2100 +
2101 +#ifdef CONFIG_PAX_EMUPLT
2102 + int err;
2103 +
2104 + do { /* PaX: unpatched PLT emulation */
2105 + unsigned int bl, depwi;
2106 +
2107 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2108 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2109 +
2110 + if (err)
2111 + break;
2112 +
2113 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2114 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2115 +
2116 + err = get_user(ldw, (unsigned int *)addr);
2117 + err |= get_user(bv, (unsigned int *)(addr+4));
2118 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2119 +
2120 + if (err)
2121 + break;
2122 +
2123 + if (ldw == 0x0E801096U &&
2124 + bv == 0xEAC0C000U &&
2125 + ldw2 == 0x0E881095U)
2126 + {
2127 + unsigned int resolver, map;
2128 +
2129 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2130 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2131 + if (err)
2132 + break;
2133 +
2134 + regs->gr[20] = instruction_pointer(regs)+8;
2135 + regs->gr[21] = map;
2136 + regs->gr[22] = resolver;
2137 + regs->iaoq[0] = resolver | 3UL;
2138 + regs->iaoq[1] = regs->iaoq[0] + 4;
2139 + return 3;
2140 + }
2141 + }
2142 + } while (0);
2143 +#endif
2144 +
2145 +#ifdef CONFIG_PAX_EMUTRAMP
2146 +
2147 +#ifndef CONFIG_PAX_EMUSIGRT
2148 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2149 + return 1;
2150 +#endif
2151 +
2152 + do { /* PaX: rt_sigreturn emulation */
2153 + unsigned int ldi1, ldi2, bel, nop;
2154 +
2155 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2156 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2157 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2158 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2159 +
2160 + if (err)
2161 + break;
2162 +
2163 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2164 + ldi2 == 0x3414015AU &&
2165 + bel == 0xE4008200U &&
2166 + nop == 0x08000240U)
2167 + {
2168 + regs->gr[25] = (ldi1 & 2) >> 1;
2169 + regs->gr[20] = __NR_rt_sigreturn;
2170 + regs->gr[31] = regs->iaoq[1] + 16;
2171 + regs->sr[0] = regs->iasq[1];
2172 + regs->iaoq[0] = 0x100UL;
2173 + regs->iaoq[1] = regs->iaoq[0] + 4;
2174 + regs->iasq[0] = regs->sr[2];
2175 + regs->iasq[1] = regs->sr[2];
2176 + return 2;
2177 + }
2178 + } while (0);
2179 +#endif
2180 +
2181 + return 1;
2182 +}
2183 +
2184 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2185 +{
2186 + unsigned long i;
2187 +
2188 + printk(KERN_ERR "PAX: bytes at PC: ");
2189 + for (i = 0; i < 5; i++) {
2190 + unsigned int c;
2191 + if (get_user(c, (unsigned int *)pc+i))
2192 + printk(KERN_CONT "???????? ");
2193 + else
2194 + printk(KERN_CONT "%08x ", c);
2195 + }
2196 + printk("\n");
2197 +}
2198 +#endif
2199 +
2200 int fixup_exception(struct pt_regs *regs)
2201 {
2202 const struct exception_table_entry *fix;
2203 @@ -192,8 +303,33 @@ good_area:
2204
2205 acc_type = parisc_acctyp(code,regs->iir);
2206
2207 - if ((vma->vm_flags & acc_type) != acc_type)
2208 + if ((vma->vm_flags & acc_type) != acc_type) {
2209 +
2210 +#ifdef CONFIG_PAX_PAGEEXEC
2211 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2212 + (address & ~3UL) == instruction_pointer(regs))
2213 + {
2214 + up_read(&mm->mmap_sem);
2215 + switch (pax_handle_fetch_fault(regs)) {
2216 +
2217 +#ifdef CONFIG_PAX_EMUPLT
2218 + case 3:
2219 + return;
2220 +#endif
2221 +
2222 +#ifdef CONFIG_PAX_EMUTRAMP
2223 + case 2:
2224 + return;
2225 +#endif
2226 +
2227 + }
2228 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2229 + do_group_exit(SIGKILL);
2230 + }
2231 +#endif
2232 +
2233 goto bad_area;
2234 + }
2235
2236 /*
2237 * If for any reason at all we couldn't handle the fault, make
2238 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
2239 index 02e41b5..ec6e26c 100644
2240 --- a/arch/powerpc/include/asm/atomic.h
2241 +++ b/arch/powerpc/include/asm/atomic.h
2242 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2243
2244 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2245
2246 +#define atomic64_read_unchecked(v) atomic64_read(v)
2247 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2248 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2249 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2250 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2251 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2252 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2253 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2254 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2255 +
2256 #endif /* __powerpc64__ */
2257
2258 #endif /* __KERNEL__ */
2259 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2260 index 3bf9cca..e7457d0 100644
2261 --- a/arch/powerpc/include/asm/elf.h
2262 +++ b/arch/powerpc/include/asm/elf.h
2263 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2264 the loader. We need to make sure that it is out of the way of the program
2265 that it will "exec", and that there is sufficient room for the brk. */
2266
2267 -extern unsigned long randomize_et_dyn(unsigned long base);
2268 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2269 +#define ELF_ET_DYN_BASE (0x20000000)
2270 +
2271 +#ifdef CONFIG_PAX_ASLR
2272 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2273 +
2274 +#ifdef __powerpc64__
2275 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2276 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2277 +#else
2278 +#define PAX_DELTA_MMAP_LEN 15
2279 +#define PAX_DELTA_STACK_LEN 15
2280 +#endif
2281 +#endif
2282
2283 /*
2284 * Our registers are always unsigned longs, whether we're a 32 bit
2285 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2286 (0x7ff >> (PAGE_SHIFT - 12)) : \
2287 (0x3ffff >> (PAGE_SHIFT - 12)))
2288
2289 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2290 -#define arch_randomize_brk arch_randomize_brk
2291 -
2292 #endif /* __KERNEL__ */
2293
2294 /*
2295 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2296 index bca8fdc..61e9580 100644
2297 --- a/arch/powerpc/include/asm/kmap_types.h
2298 +++ b/arch/powerpc/include/asm/kmap_types.h
2299 @@ -27,6 +27,7 @@ enum km_type {
2300 KM_PPC_SYNC_PAGE,
2301 KM_PPC_SYNC_ICACHE,
2302 KM_KDB,
2303 + KM_CLEARPAGE,
2304 KM_TYPE_NR
2305 };
2306
2307 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2308 index d4a7f64..451de1c 100644
2309 --- a/arch/powerpc/include/asm/mman.h
2310 +++ b/arch/powerpc/include/asm/mman.h
2311 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2312 }
2313 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2314
2315 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2316 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2317 {
2318 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2319 }
2320 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2321 index dd9c4fd..a2ced87 100644
2322 --- a/arch/powerpc/include/asm/page.h
2323 +++ b/arch/powerpc/include/asm/page.h
2324 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2325 * and needs to be executable. This means the whole heap ends
2326 * up being executable.
2327 */
2328 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2329 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2330 +#define VM_DATA_DEFAULT_FLAGS32 \
2331 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2332 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2333
2334 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2335 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2336 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2337 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2338 #endif
2339
2340 +#define ktla_ktva(addr) (addr)
2341 +#define ktva_ktla(addr) (addr)
2342 +
2343 /*
2344 * Use the top bit of the higher-level page table entries to indicate whether
2345 * the entries we point to contain hugepages. This works because we know that
2346 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2347 index fb40ede..d3ce956 100644
2348 --- a/arch/powerpc/include/asm/page_64.h
2349 +++ b/arch/powerpc/include/asm/page_64.h
2350 @@ -144,15 +144,18 @@ do { \
2351 * stack by default, so in the absence of a PT_GNU_STACK program header
2352 * we turn execute permission off.
2353 */
2354 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2355 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2356 +#define VM_STACK_DEFAULT_FLAGS32 \
2357 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2358 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2359
2360 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2361 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2362
2363 +#ifndef CONFIG_PAX_PAGEEXEC
2364 #define VM_STACK_DEFAULT_FLAGS \
2365 (is_32bit_task() ? \
2366 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2367 +#endif
2368
2369 #include <asm-generic/getorder.h>
2370
2371 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2372 index 88b0bd9..e32bc67 100644
2373 --- a/arch/powerpc/include/asm/pgtable.h
2374 +++ b/arch/powerpc/include/asm/pgtable.h
2375 @@ -2,6 +2,7 @@
2376 #define _ASM_POWERPC_PGTABLE_H
2377 #ifdef __KERNEL__
2378
2379 +#include <linux/const.h>
2380 #ifndef __ASSEMBLY__
2381 #include <asm/processor.h> /* For TASK_SIZE */
2382 #include <asm/mmu.h>
2383 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2384 index 4aad413..85d86bf 100644
2385 --- a/arch/powerpc/include/asm/pte-hash32.h
2386 +++ b/arch/powerpc/include/asm/pte-hash32.h
2387 @@ -21,6 +21,7 @@
2388 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2389 #define _PAGE_USER 0x004 /* usermode access allowed */
2390 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2391 +#define _PAGE_EXEC _PAGE_GUARDED
2392 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2393 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2394 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2395 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2396 index 559da19..7e5835c 100644
2397 --- a/arch/powerpc/include/asm/reg.h
2398 +++ b/arch/powerpc/include/asm/reg.h
2399 @@ -212,6 +212,7 @@
2400 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2401 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2402 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2403 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2404 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2405 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2406 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2407 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2408 index e30a13d..2b7d994 100644
2409 --- a/arch/powerpc/include/asm/system.h
2410 +++ b/arch/powerpc/include/asm/system.h
2411 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2412 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2413 #endif
2414
2415 -extern unsigned long arch_align_stack(unsigned long sp);
2416 +#define arch_align_stack(x) ((x) & ~0xfUL)
2417
2418 /* Used in very early kernel initialization. */
2419 extern unsigned long reloc_offset(void);
2420 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2421 index bd0fb84..a42a14b 100644
2422 --- a/arch/powerpc/include/asm/uaccess.h
2423 +++ b/arch/powerpc/include/asm/uaccess.h
2424 @@ -13,6 +13,8 @@
2425 #define VERIFY_READ 0
2426 #define VERIFY_WRITE 1
2427
2428 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2429 +
2430 /*
2431 * The fs value determines whether argument validity checking should be
2432 * performed or not. If get_fs() == USER_DS, checking is performed, with
2433 @@ -327,52 +329,6 @@ do { \
2434 extern unsigned long __copy_tofrom_user(void __user *to,
2435 const void __user *from, unsigned long size);
2436
2437 -#ifndef __powerpc64__
2438 -
2439 -static inline unsigned long copy_from_user(void *to,
2440 - const void __user *from, unsigned long n)
2441 -{
2442 - unsigned long over;
2443 -
2444 - if (access_ok(VERIFY_READ, from, n))
2445 - return __copy_tofrom_user((__force void __user *)to, from, n);
2446 - if ((unsigned long)from < TASK_SIZE) {
2447 - over = (unsigned long)from + n - TASK_SIZE;
2448 - return __copy_tofrom_user((__force void __user *)to, from,
2449 - n - over) + over;
2450 - }
2451 - return n;
2452 -}
2453 -
2454 -static inline unsigned long copy_to_user(void __user *to,
2455 - const void *from, unsigned long n)
2456 -{
2457 - unsigned long over;
2458 -
2459 - if (access_ok(VERIFY_WRITE, to, n))
2460 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2461 - if ((unsigned long)to < TASK_SIZE) {
2462 - over = (unsigned long)to + n - TASK_SIZE;
2463 - return __copy_tofrom_user(to, (__force void __user *)from,
2464 - n - over) + over;
2465 - }
2466 - return n;
2467 -}
2468 -
2469 -#else /* __powerpc64__ */
2470 -
2471 -#define __copy_in_user(to, from, size) \
2472 - __copy_tofrom_user((to), (from), (size))
2473 -
2474 -extern unsigned long copy_from_user(void *to, const void __user *from,
2475 - unsigned long n);
2476 -extern unsigned long copy_to_user(void __user *to, const void *from,
2477 - unsigned long n);
2478 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2479 - unsigned long n);
2480 -
2481 -#endif /* __powerpc64__ */
2482 -
2483 static inline unsigned long __copy_from_user_inatomic(void *to,
2484 const void __user *from, unsigned long n)
2485 {
2486 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2487 if (ret == 0)
2488 return 0;
2489 }
2490 +
2491 + if (!__builtin_constant_p(n))
2492 + check_object_size(to, n, false);
2493 +
2494 return __copy_tofrom_user((__force void __user *)to, from, n);
2495 }
2496
2497 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2498 if (ret == 0)
2499 return 0;
2500 }
2501 +
2502 + if (!__builtin_constant_p(n))
2503 + check_object_size(from, n, true);
2504 +
2505 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2506 }
2507
2508 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2509 return __copy_to_user_inatomic(to, from, size);
2510 }
2511
2512 +#ifndef __powerpc64__
2513 +
2514 +static inline unsigned long __must_check copy_from_user(void *to,
2515 + const void __user *from, unsigned long n)
2516 +{
2517 + unsigned long over;
2518 +
2519 + if ((long)n < 0)
2520 + return n;
2521 +
2522 + if (access_ok(VERIFY_READ, from, n)) {
2523 + if (!__builtin_constant_p(n))
2524 + check_object_size(to, n, false);
2525 + return __copy_tofrom_user((__force void __user *)to, from, n);
2526 + }
2527 + if ((unsigned long)from < TASK_SIZE) {
2528 + over = (unsigned long)from + n - TASK_SIZE;
2529 + if (!__builtin_constant_p(n - over))
2530 + check_object_size(to, n - over, false);
2531 + return __copy_tofrom_user((__force void __user *)to, from,
2532 + n - over) + over;
2533 + }
2534 + return n;
2535 +}
2536 +
2537 +static inline unsigned long __must_check copy_to_user(void __user *to,
2538 + const void *from, unsigned long n)
2539 +{
2540 + unsigned long over;
2541 +
2542 + if ((long)n < 0)
2543 + return n;
2544 +
2545 + if (access_ok(VERIFY_WRITE, to, n)) {
2546 + if (!__builtin_constant_p(n))
2547 + check_object_size(from, n, true);
2548 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2549 + }
2550 + if ((unsigned long)to < TASK_SIZE) {
2551 + over = (unsigned long)to + n - TASK_SIZE;
2552 + if (!__builtin_constant_p(n))
2553 + check_object_size(from, n - over, true);
2554 + return __copy_tofrom_user(to, (__force void __user *)from,
2555 + n - over) + over;
2556 + }
2557 + return n;
2558 +}
2559 +
2560 +#else /* __powerpc64__ */
2561 +
2562 +#define __copy_in_user(to, from, size) \
2563 + __copy_tofrom_user((to), (from), (size))
2564 +
2565 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2566 +{
2567 + if ((long)n < 0 || n > INT_MAX)
2568 + return n;
2569 +
2570 + if (!__builtin_constant_p(n))
2571 + check_object_size(to, n, false);
2572 +
2573 + if (likely(access_ok(VERIFY_READ, from, n)))
2574 + n = __copy_from_user(to, from, n);
2575 + else
2576 + memset(to, 0, n);
2577 + return n;
2578 +}
2579 +
2580 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2581 +{
2582 + if ((long)n < 0 || n > INT_MAX)
2583 + return n;
2584 +
2585 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2586 + if (!__builtin_constant_p(n))
2587 + check_object_size(from, n, true);
2588 + n = __copy_to_user(to, from, n);
2589 + }
2590 + return n;
2591 +}
2592 +
2593 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2594 + unsigned long n);
2595 +
2596 +#endif /* __powerpc64__ */
2597 +
2598 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2599
2600 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2601 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2602 index 429983c..7af363b 100644
2603 --- a/arch/powerpc/kernel/exceptions-64e.S
2604 +++ b/arch/powerpc/kernel/exceptions-64e.S
2605 @@ -587,6 +587,7 @@ storage_fault_common:
2606 std r14,_DAR(r1)
2607 std r15,_DSISR(r1)
2608 addi r3,r1,STACK_FRAME_OVERHEAD
2609 + bl .save_nvgprs
2610 mr r4,r14
2611 mr r5,r15
2612 ld r14,PACA_EXGEN+EX_R14(r13)
2613 @@ -596,8 +597,7 @@ storage_fault_common:
2614 cmpdi r3,0
2615 bne- 1f
2616 b .ret_from_except_lite
2617 -1: bl .save_nvgprs
2618 - mr r5,r3
2619 +1: mr r5,r3
2620 addi r3,r1,STACK_FRAME_OVERHEAD
2621 ld r4,_DAR(r1)
2622 bl .bad_page_fault
2623 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2624 index cf9c69b..ebc9640 100644
2625 --- a/arch/powerpc/kernel/exceptions-64s.S
2626 +++ b/arch/powerpc/kernel/exceptions-64s.S
2627 @@ -1004,10 +1004,10 @@ handle_page_fault:
2628 11: ld r4,_DAR(r1)
2629 ld r5,_DSISR(r1)
2630 addi r3,r1,STACK_FRAME_OVERHEAD
2631 + bl .save_nvgprs
2632 bl .do_page_fault
2633 cmpdi r3,0
2634 beq+ 13f
2635 - bl .save_nvgprs
2636 mr r5,r3
2637 addi r3,r1,STACK_FRAME_OVERHEAD
2638 lwz r4,_DAR(r1)
2639 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2640 index 0b6d796..d760ddb 100644
2641 --- a/arch/powerpc/kernel/module_32.c
2642 +++ b/arch/powerpc/kernel/module_32.c
2643 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2644 me->arch.core_plt_section = i;
2645 }
2646 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2647 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2648 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2649 return -ENOEXEC;
2650 }
2651
2652 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2653
2654 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2655 /* Init, or core PLT? */
2656 - if (location >= mod->module_core
2657 - && location < mod->module_core + mod->core_size)
2658 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2659 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2660 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2661 - else
2662 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2663 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2664 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2665 + else {
2666 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2667 + return ~0UL;
2668 + }
2669
2670 /* Find this entry, or if that fails, the next avail. entry */
2671 while (entry->jump[0]) {
2672 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2673 index 6457574..08b28d3 100644
2674 --- a/arch/powerpc/kernel/process.c
2675 +++ b/arch/powerpc/kernel/process.c
2676 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2677 * Lookup NIP late so we have the best change of getting the
2678 * above info out without failing
2679 */
2680 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2681 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2682 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2683 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2684 #endif
2685 show_stack(current, (unsigned long *) regs->gpr[1]);
2686 if (!user_mode(regs))
2687 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2688 newsp = stack[0];
2689 ip = stack[STACK_FRAME_LR_SAVE];
2690 if (!firstframe || ip != lr) {
2691 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2692 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2693 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2694 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2695 - printk(" (%pS)",
2696 + printk(" (%pA)",
2697 (void *)current->ret_stack[curr_frame].ret);
2698 curr_frame--;
2699 }
2700 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2701 struct pt_regs *regs = (struct pt_regs *)
2702 (sp + STACK_FRAME_OVERHEAD);
2703 lr = regs->link;
2704 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2705 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2706 regs->trap, (void *)regs->nip, (void *)lr);
2707 firstframe = 1;
2708 }
2709 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2710 }
2711
2712 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2713 -
2714 -unsigned long arch_align_stack(unsigned long sp)
2715 -{
2716 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2717 - sp -= get_random_int() & ~PAGE_MASK;
2718 - return sp & ~0xf;
2719 -}
2720 -
2721 -static inline unsigned long brk_rnd(void)
2722 -{
2723 - unsigned long rnd = 0;
2724 -
2725 - /* 8MB for 32bit, 1GB for 64bit */
2726 - if (is_32bit_task())
2727 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2728 - else
2729 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2730 -
2731 - return rnd << PAGE_SHIFT;
2732 -}
2733 -
2734 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2735 -{
2736 - unsigned long base = mm->brk;
2737 - unsigned long ret;
2738 -
2739 -#ifdef CONFIG_PPC_STD_MMU_64
2740 - /*
2741 - * If we are using 1TB segments and we are allowed to randomise
2742 - * the heap, we can put it above 1TB so it is backed by a 1TB
2743 - * segment. Otherwise the heap will be in the bottom 1TB
2744 - * which always uses 256MB segments and this may result in a
2745 - * performance penalty.
2746 - */
2747 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2748 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2749 -#endif
2750 -
2751 - ret = PAGE_ALIGN(base + brk_rnd());
2752 -
2753 - if (ret < mm->brk)
2754 - return mm->brk;
2755 -
2756 - return ret;
2757 -}
2758 -
2759 -unsigned long randomize_et_dyn(unsigned long base)
2760 -{
2761 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2762 -
2763 - if (ret < base)
2764 - return base;
2765 -
2766 - return ret;
2767 -}
2768 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2769 index 836a5a1..27289a3 100644
2770 --- a/arch/powerpc/kernel/signal_32.c
2771 +++ b/arch/powerpc/kernel/signal_32.c
2772 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2773 /* Save user registers on the stack */
2774 frame = &rt_sf->uc.uc_mcontext;
2775 addr = frame;
2776 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2777 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2778 if (save_user_regs(regs, frame, 0, 1))
2779 goto badframe;
2780 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2781 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2782 index a50b5ec..547078a 100644
2783 --- a/arch/powerpc/kernel/signal_64.c
2784 +++ b/arch/powerpc/kernel/signal_64.c
2785 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2786 current->thread.fpscr.val = 0;
2787
2788 /* Set up to return from userspace. */
2789 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2790 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2792 } else {
2793 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2794 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2795 index 5459d14..10f8070 100644
2796 --- a/arch/powerpc/kernel/traps.c
2797 +++ b/arch/powerpc/kernel/traps.c
2798 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2799 static inline void pmac_backlight_unblank(void) { }
2800 #endif
2801
2802 +extern void gr_handle_kernel_exploit(void);
2803 +
2804 int die(const char *str, struct pt_regs *regs, long err)
2805 {
2806 static struct {
2807 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2808 if (panic_on_oops)
2809 panic("Fatal exception");
2810
2811 + gr_handle_kernel_exploit();
2812 +
2813 oops_exit();
2814 do_exit(err);
2815
2816 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2817 index 7d14bb6..1305601 100644
2818 --- a/arch/powerpc/kernel/vdso.c
2819 +++ b/arch/powerpc/kernel/vdso.c
2820 @@ -35,6 +35,7 @@
2821 #include <asm/firmware.h>
2822 #include <asm/vdso.h>
2823 #include <asm/vdso_datapage.h>
2824 +#include <asm/mman.h>
2825
2826 #include "setup.h"
2827
2828 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2829 vdso_base = VDSO32_MBASE;
2830 #endif
2831
2832 - current->mm->context.vdso_base = 0;
2833 + current->mm->context.vdso_base = ~0UL;
2834
2835 /* vDSO has a problem and was disabled, just don't "enable" it for the
2836 * process
2837 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2838 vdso_base = get_unmapped_area(NULL, vdso_base,
2839 (vdso_pages << PAGE_SHIFT) +
2840 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2841 - 0, 0);
2842 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2843 if (IS_ERR_VALUE(vdso_base)) {
2844 rc = vdso_base;
2845 goto fail_mmapsem;
2846 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2847 index 5eea6f3..5d10396 100644
2848 --- a/arch/powerpc/lib/usercopy_64.c
2849 +++ b/arch/powerpc/lib/usercopy_64.c
2850 @@ -9,22 +9,6 @@
2851 #include <linux/module.h>
2852 #include <asm/uaccess.h>
2853
2854 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2855 -{
2856 - if (likely(access_ok(VERIFY_READ, from, n)))
2857 - n = __copy_from_user(to, from, n);
2858 - else
2859 - memset(to, 0, n);
2860 - return n;
2861 -}
2862 -
2863 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2864 -{
2865 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2866 - n = __copy_to_user(to, from, n);
2867 - return n;
2868 -}
2869 -
2870 unsigned long copy_in_user(void __user *to, const void __user *from,
2871 unsigned long n)
2872 {
2873 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2874 return n;
2875 }
2876
2877 -EXPORT_SYMBOL(copy_from_user);
2878 -EXPORT_SYMBOL(copy_to_user);
2879 EXPORT_SYMBOL(copy_in_user);
2880
2881 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2882 index 5efe8c9..db9ceef 100644
2883 --- a/arch/powerpc/mm/fault.c
2884 +++ b/arch/powerpc/mm/fault.c
2885 @@ -32,6 +32,10 @@
2886 #include <linux/perf_event.h>
2887 #include <linux/magic.h>
2888 #include <linux/ratelimit.h>
2889 +#include <linux/slab.h>
2890 +#include <linux/pagemap.h>
2891 +#include <linux/compiler.h>
2892 +#include <linux/unistd.h>
2893
2894 #include <asm/firmware.h>
2895 #include <asm/page.h>
2896 @@ -43,6 +47,7 @@
2897 #include <asm/tlbflush.h>
2898 #include <asm/siginfo.h>
2899 #include <mm/mmu_decl.h>
2900 +#include <asm/ptrace.h>
2901
2902 #ifdef CONFIG_KPROBES
2903 static inline int notify_page_fault(struct pt_regs *regs)
2904 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2905 }
2906 #endif
2907
2908 +#ifdef CONFIG_PAX_PAGEEXEC
2909 +/*
2910 + * PaX: decide what to do with offenders (regs->nip = fault address)
2911 + *
2912 + * returns 1 when task should be killed
2913 + */
2914 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2915 +{
2916 + return 1;
2917 +}
2918 +
2919 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2920 +{
2921 + unsigned long i;
2922 +
2923 + printk(KERN_ERR "PAX: bytes at PC: ");
2924 + for (i = 0; i < 5; i++) {
2925 + unsigned int c;
2926 + if (get_user(c, (unsigned int __user *)pc+i))
2927 + printk(KERN_CONT "???????? ");
2928 + else
2929 + printk(KERN_CONT "%08x ", c);
2930 + }
2931 + printk("\n");
2932 +}
2933 +#endif
2934 +
2935 /*
2936 * Check whether the instruction at regs->nip is a store using
2937 * an update addressing form which will update r1.
2938 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2939 * indicate errors in DSISR but can validly be set in SRR1.
2940 */
2941 if (trap == 0x400)
2942 - error_code &= 0x48200000;
2943 + error_code &= 0x58200000;
2944 else
2945 is_write = error_code & DSISR_ISSTORE;
2946 #else
2947 @@ -259,7 +291,7 @@ good_area:
2948 * "undefined". Of those that can be set, this is the only
2949 * one which seems bad.
2950 */
2951 - if (error_code & 0x10000000)
2952 + if (error_code & DSISR_GUARDED)
2953 /* Guarded storage error. */
2954 goto bad_area;
2955 #endif /* CONFIG_8xx */
2956 @@ -274,7 +306,7 @@ good_area:
2957 * processors use the same I/D cache coherency mechanism
2958 * as embedded.
2959 */
2960 - if (error_code & DSISR_PROTFAULT)
2961 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2962 goto bad_area;
2963 #endif /* CONFIG_PPC_STD_MMU */
2964
2965 @@ -343,6 +375,23 @@ bad_area:
2966 bad_area_nosemaphore:
2967 /* User mode accesses cause a SIGSEGV */
2968 if (user_mode(regs)) {
2969 +
2970 +#ifdef CONFIG_PAX_PAGEEXEC
2971 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2972 +#ifdef CONFIG_PPC_STD_MMU
2973 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2974 +#else
2975 + if (is_exec && regs->nip == address) {
2976 +#endif
2977 + switch (pax_handle_fetch_fault(regs)) {
2978 + }
2979 +
2980 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2981 + do_group_exit(SIGKILL);
2982 + }
2983 + }
2984 +#endif
2985 +
2986 _exception(SIGSEGV, regs, code, address);
2987 return 0;
2988 }
2989 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2990 index 5a783d8..c23e14b 100644
2991 --- a/arch/powerpc/mm/mmap_64.c
2992 +++ b/arch/powerpc/mm/mmap_64.c
2993 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2994 */
2995 if (mmap_is_legacy()) {
2996 mm->mmap_base = TASK_UNMAPPED_BASE;
2997 +
2998 +#ifdef CONFIG_PAX_RANDMMAP
2999 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3000 + mm->mmap_base += mm->delta_mmap;
3001 +#endif
3002 +
3003 mm->get_unmapped_area = arch_get_unmapped_area;
3004 mm->unmap_area = arch_unmap_area;
3005 } else {
3006 mm->mmap_base = mmap_base();
3007 +
3008 +#ifdef CONFIG_PAX_RANDMMAP
3009 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3010 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3011 +#endif
3012 +
3013 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3014 mm->unmap_area = arch_unmap_area_topdown;
3015 }
3016 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3017 index 73709f7..6b90313 100644
3018 --- a/arch/powerpc/mm/slice.c
3019 +++ b/arch/powerpc/mm/slice.c
3020 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3021 if ((mm->task_size - len) < addr)
3022 return 0;
3023 vma = find_vma(mm, addr);
3024 - return (!vma || (addr + len) <= vma->vm_start);
3025 + return check_heap_stack_gap(vma, addr, len);
3026 }
3027
3028 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3029 @@ -256,7 +256,7 @@ full_search:
3030 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3031 continue;
3032 }
3033 - if (!vma || addr + len <= vma->vm_start) {
3034 + if (check_heap_stack_gap(vma, addr, len)) {
3035 /*
3036 * Remember the place where we stopped the search:
3037 */
3038 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3039 }
3040 }
3041
3042 - addr = mm->mmap_base;
3043 - while (addr > len) {
3044 + if (mm->mmap_base < len)
3045 + addr = -ENOMEM;
3046 + else
3047 + addr = mm->mmap_base - len;
3048 +
3049 + while (!IS_ERR_VALUE(addr)) {
3050 /* Go down by chunk size */
3051 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3052 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3053
3054 /* Check for hit with different page size */
3055 mask = slice_range_to_mask(addr, len);
3056 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3057 * return with success:
3058 */
3059 vma = find_vma(mm, addr);
3060 - if (!vma || (addr + len) <= vma->vm_start) {
3061 + if (check_heap_stack_gap(vma, addr, len)) {
3062 /* remember the address as a hint for next time */
3063 if (use_cache)
3064 mm->free_area_cache = addr;
3065 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3066 mm->cached_hole_size = vma->vm_start - addr;
3067
3068 /* try just below the current vma->vm_start */
3069 - addr = vma->vm_start;
3070 + addr = skip_heap_stack_gap(vma, len);
3071 }
3072
3073 /*
3074 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3075 if (fixed && addr > (mm->task_size - len))
3076 return -EINVAL;
3077
3078 +#ifdef CONFIG_PAX_RANDMMAP
3079 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3080 + addr = 0;
3081 +#endif
3082 +
3083 /* If hint, make sure it matches our alignment restrictions */
3084 if (!fixed && addr) {
3085 addr = _ALIGN_UP(addr, 1ul << pshift);
3086 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
3087 index 8517d2a..d2738d4 100644
3088 --- a/arch/s390/include/asm/atomic.h
3089 +++ b/arch/s390/include/asm/atomic.h
3090 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
3091 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
3092 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3093
3094 +#define atomic64_read_unchecked(v) atomic64_read(v)
3095 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3096 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3097 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3098 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3099 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3100 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3101 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3102 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3103 +
3104 #define smp_mb__before_atomic_dec() smp_mb()
3105 #define smp_mb__after_atomic_dec() smp_mb()
3106 #define smp_mb__before_atomic_inc() smp_mb()
3107 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3108 index 547f1a6..0b22b53 100644
3109 --- a/arch/s390/include/asm/elf.h
3110 +++ b/arch/s390/include/asm/elf.h
3111 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
3112 the loader. We need to make sure that it is out of the way of the program
3113 that it will "exec", and that there is sufficient room for the brk. */
3114
3115 -extern unsigned long randomize_et_dyn(unsigned long base);
3116 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
3117 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3118 +
3119 +#ifdef CONFIG_PAX_ASLR
3120 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3121 +
3122 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3123 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3124 +#endif
3125
3126 /* This yields a mask that user programs can use to figure out what
3127 instruction set this CPU supports. */
3128 @@ -211,7 +217,4 @@ struct linux_binprm;
3129 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3130 int arch_setup_additional_pages(struct linux_binprm *, int);
3131
3132 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3133 -#define arch_randomize_brk arch_randomize_brk
3134 -
3135 #endif
3136 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
3137 index ef573c1..75a1ce6 100644
3138 --- a/arch/s390/include/asm/system.h
3139 +++ b/arch/s390/include/asm/system.h
3140 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
3141 extern void (*_machine_halt)(void);
3142 extern void (*_machine_power_off)(void);
3143
3144 -extern unsigned long arch_align_stack(unsigned long sp);
3145 +#define arch_align_stack(x) ((x) & ~0xfUL)
3146
3147 static inline int tprot(unsigned long addr)
3148 {
3149 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3150 index 2b23885..e136e31 100644
3151 --- a/arch/s390/include/asm/uaccess.h
3152 +++ b/arch/s390/include/asm/uaccess.h
3153 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
3154 copy_to_user(void __user *to, const void *from, unsigned long n)
3155 {
3156 might_fault();
3157 +
3158 + if ((long)n < 0)
3159 + return n;
3160 +
3161 if (access_ok(VERIFY_WRITE, to, n))
3162 n = __copy_to_user(to, from, n);
3163 return n;
3164 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3165 static inline unsigned long __must_check
3166 __copy_from_user(void *to, const void __user *from, unsigned long n)
3167 {
3168 + if ((long)n < 0)
3169 + return n;
3170 +
3171 if (__builtin_constant_p(n) && (n <= 256))
3172 return uaccess.copy_from_user_small(n, from, to);
3173 else
3174 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
3175 unsigned int sz = __compiletime_object_size(to);
3176
3177 might_fault();
3178 +
3179 + if ((long)n < 0)
3180 + return n;
3181 +
3182 if (unlikely(sz != -1 && sz < n)) {
3183 copy_from_user_overflow();
3184 return n;
3185 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3186 index dfcb343..eda788a 100644
3187 --- a/arch/s390/kernel/module.c
3188 +++ b/arch/s390/kernel/module.c
3189 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3190
3191 /* Increase core size by size of got & plt and set start
3192 offsets for got and plt. */
3193 - me->core_size = ALIGN(me->core_size, 4);
3194 - me->arch.got_offset = me->core_size;
3195 - me->core_size += me->arch.got_size;
3196 - me->arch.plt_offset = me->core_size;
3197 - me->core_size += me->arch.plt_size;
3198 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3199 + me->arch.got_offset = me->core_size_rw;
3200 + me->core_size_rw += me->arch.got_size;
3201 + me->arch.plt_offset = me->core_size_rx;
3202 + me->core_size_rx += me->arch.plt_size;
3203 return 0;
3204 }
3205
3206 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3207 if (info->got_initialized == 0) {
3208 Elf_Addr *gotent;
3209
3210 - gotent = me->module_core + me->arch.got_offset +
3211 + gotent = me->module_core_rw + me->arch.got_offset +
3212 info->got_offset;
3213 *gotent = val;
3214 info->got_initialized = 1;
3215 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3216 else if (r_type == R_390_GOTENT ||
3217 r_type == R_390_GOTPLTENT)
3218 *(unsigned int *) loc =
3219 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3220 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3221 else if (r_type == R_390_GOT64 ||
3222 r_type == R_390_GOTPLT64)
3223 *(unsigned long *) loc = val;
3224 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3225 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3226 if (info->plt_initialized == 0) {
3227 unsigned int *ip;
3228 - ip = me->module_core + me->arch.plt_offset +
3229 + ip = me->module_core_rx + me->arch.plt_offset +
3230 info->plt_offset;
3231 #ifndef CONFIG_64BIT
3232 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3233 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3234 val - loc + 0xffffUL < 0x1ffffeUL) ||
3235 (r_type == R_390_PLT32DBL &&
3236 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3237 - val = (Elf_Addr) me->module_core +
3238 + val = (Elf_Addr) me->module_core_rx +
3239 me->arch.plt_offset +
3240 info->plt_offset;
3241 val += rela->r_addend - loc;
3242 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3243 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3244 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3245 val = val + rela->r_addend -
3246 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3247 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3248 if (r_type == R_390_GOTOFF16)
3249 *(unsigned short *) loc = val;
3250 else if (r_type == R_390_GOTOFF32)
3251 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3252 break;
3253 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3254 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3255 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3256 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3257 rela->r_addend - loc;
3258 if (r_type == R_390_GOTPC)
3259 *(unsigned int *) loc = val;
3260 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3261 index 9451b21..ed8956f 100644
3262 --- a/arch/s390/kernel/process.c
3263 +++ b/arch/s390/kernel/process.c
3264 @@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3265 }
3266 return 0;
3267 }
3268 -
3269 -unsigned long arch_align_stack(unsigned long sp)
3270 -{
3271 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3272 - sp -= get_random_int() & ~PAGE_MASK;
3273 - return sp & ~0xf;
3274 -}
3275 -
3276 -static inline unsigned long brk_rnd(void)
3277 -{
3278 - /* 8MB for 32bit, 1GB for 64bit */
3279 - if (is_32bit_task())
3280 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3281 - else
3282 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3283 -}
3284 -
3285 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3286 -{
3287 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3288 -
3289 - if (ret < mm->brk)
3290 - return mm->brk;
3291 - return ret;
3292 -}
3293 -
3294 -unsigned long randomize_et_dyn(unsigned long base)
3295 -{
3296 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3297 -
3298 - if (!(current->flags & PF_RANDOMIZE))
3299 - return base;
3300 - if (ret < base)
3301 - return base;
3302 - return ret;
3303 -}
3304 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3305 index f09c748..cf9ec1d 100644
3306 --- a/arch/s390/mm/mmap.c
3307 +++ b/arch/s390/mm/mmap.c
3308 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3309 */
3310 if (mmap_is_legacy()) {
3311 mm->mmap_base = TASK_UNMAPPED_BASE;
3312 +
3313 +#ifdef CONFIG_PAX_RANDMMAP
3314 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3315 + mm->mmap_base += mm->delta_mmap;
3316 +#endif
3317 +
3318 mm->get_unmapped_area = arch_get_unmapped_area;
3319 mm->unmap_area = arch_unmap_area;
3320 } else {
3321 mm->mmap_base = mmap_base();
3322 +
3323 +#ifdef CONFIG_PAX_RANDMMAP
3324 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3325 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3326 +#endif
3327 +
3328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3329 mm->unmap_area = arch_unmap_area_topdown;
3330 }
3331 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3332 */
3333 if (mmap_is_legacy()) {
3334 mm->mmap_base = TASK_UNMAPPED_BASE;
3335 +
3336 +#ifdef CONFIG_PAX_RANDMMAP
3337 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3338 + mm->mmap_base += mm->delta_mmap;
3339 +#endif
3340 +
3341 mm->get_unmapped_area = s390_get_unmapped_area;
3342 mm->unmap_area = arch_unmap_area;
3343 } else {
3344 mm->mmap_base = mmap_base();
3345 +
3346 +#ifdef CONFIG_PAX_RANDMMAP
3347 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3348 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3349 +#endif
3350 +
3351 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3352 mm->unmap_area = arch_unmap_area_topdown;
3353 }
3354 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3355 index 589d5c7..669e274 100644
3356 --- a/arch/score/include/asm/system.h
3357 +++ b/arch/score/include/asm/system.h
3358 @@ -17,7 +17,7 @@ do { \
3359 #define finish_arch_switch(prev) do {} while (0)
3360
3361 typedef void (*vi_handler_t)(void);
3362 -extern unsigned long arch_align_stack(unsigned long sp);
3363 +#define arch_align_stack(x) (x)
3364
3365 #define mb() barrier()
3366 #define rmb() barrier()
3367 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3368 index 25d0803..d6c8e36 100644
3369 --- a/arch/score/kernel/process.c
3370 +++ b/arch/score/kernel/process.c
3371 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3372
3373 return task_pt_regs(task)->cp0_epc;
3374 }
3375 -
3376 -unsigned long arch_align_stack(unsigned long sp)
3377 -{
3378 - return sp;
3379 -}
3380 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3381 index afeb710..d1d1289 100644
3382 --- a/arch/sh/mm/mmap.c
3383 +++ b/arch/sh/mm/mmap.c
3384 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3385 addr = PAGE_ALIGN(addr);
3386
3387 vma = find_vma(mm, addr);
3388 - if (TASK_SIZE - len >= addr &&
3389 - (!vma || addr + len <= vma->vm_start))
3390 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3391 return addr;
3392 }
3393
3394 @@ -106,7 +105,7 @@ full_search:
3395 }
3396 return -ENOMEM;
3397 }
3398 - if (likely(!vma || addr + len <= vma->vm_start)) {
3399 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3400 /*
3401 * Remember the place where we stopped the search:
3402 */
3403 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3404 addr = PAGE_ALIGN(addr);
3405
3406 vma = find_vma(mm, addr);
3407 - if (TASK_SIZE - len >= addr &&
3408 - (!vma || addr + len <= vma->vm_start))
3409 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3410 return addr;
3411 }
3412
3413 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3414 /* make sure it can fit in the remaining address space */
3415 if (likely(addr > len)) {
3416 vma = find_vma(mm, addr-len);
3417 - if (!vma || addr <= vma->vm_start) {
3418 + if (check_heap_stack_gap(vma, addr - len, len)) {
3419 /* remember the address as a hint for next time */
3420 return (mm->free_area_cache = addr-len);
3421 }
3422 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3423 if (unlikely(mm->mmap_base < len))
3424 goto bottomup;
3425
3426 - addr = mm->mmap_base-len;
3427 - if (do_colour_align)
3428 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3429 + addr = mm->mmap_base - len;
3430
3431 do {
3432 + if (do_colour_align)
3433 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3434 /*
3435 * Lookup failure means no vma is above this address,
3436 * else if new region fits below vma->vm_start,
3437 * return with success:
3438 */
3439 vma = find_vma(mm, addr);
3440 - if (likely(!vma || addr+len <= vma->vm_start)) {
3441 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3442 /* remember the address as a hint for next time */
3443 return (mm->free_area_cache = addr);
3444 }
3445 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3446 mm->cached_hole_size = vma->vm_start - addr;
3447
3448 /* try just below the current vma->vm_start */
3449 - addr = vma->vm_start-len;
3450 - if (do_colour_align)
3451 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3452 - } while (likely(len < vma->vm_start));
3453 + addr = skip_heap_stack_gap(vma, len);
3454 + } while (!IS_ERR_VALUE(addr));
3455
3456 bottomup:
3457 /*
3458 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
3459 index f92602e..27060b2 100644
3460 --- a/arch/sparc/Kconfig
3461 +++ b/arch/sparc/Kconfig
3462 @@ -31,6 +31,7 @@ config SPARC
3463
3464 config SPARC32
3465 def_bool !64BIT
3466 + select GENERIC_ATOMIC64
3467
3468 config SPARC64
3469 def_bool 64BIT
3470 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3471 index ad1fb5d..fc5315b 100644
3472 --- a/arch/sparc/Makefile
3473 +++ b/arch/sparc/Makefile
3474 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3475 # Export what is needed by arch/sparc/boot/Makefile
3476 export VMLINUX_INIT VMLINUX_MAIN
3477 VMLINUX_INIT := $(head-y) $(init-y)
3478 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3479 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3480 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3481 VMLINUX_MAIN += $(drivers-y) $(net-y)
3482
3483 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
3484 index 8ff83d8..4a459c2 100644
3485 --- a/arch/sparc/include/asm/atomic.h
3486 +++ b/arch/sparc/include/asm/atomic.h
3487 @@ -4,5 +4,6 @@
3488 #include <asm/atomic_64.h>
3489 #else
3490 #include <asm/atomic_32.h>
3491 +#include <asm-generic/atomic64.h>
3492 #endif
3493 #endif
3494 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3495 index 9f421df..b81fc12 100644
3496 --- a/arch/sparc/include/asm/atomic_64.h
3497 +++ b/arch/sparc/include/asm/atomic_64.h
3498 @@ -14,18 +14,40 @@
3499 #define ATOMIC64_INIT(i) { (i) }
3500
3501 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3502 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3503 +{
3504 + return v->counter;
3505 +}
3506 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3507 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3508 +{
3509 + return v->counter;
3510 +}
3511
3512 #define atomic_set(v, i) (((v)->counter) = i)
3513 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3514 +{
3515 + v->counter = i;
3516 +}
3517 #define atomic64_set(v, i) (((v)->counter) = i)
3518 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3519 +{
3520 + v->counter = i;
3521 +}
3522
3523 extern void atomic_add(int, atomic_t *);
3524 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3525 extern void atomic64_add(long, atomic64_t *);
3526 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3527 extern void atomic_sub(int, atomic_t *);
3528 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3529 extern void atomic64_sub(long, atomic64_t *);
3530 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3531
3532 extern int atomic_add_ret(int, atomic_t *);
3533 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3534 extern long atomic64_add_ret(long, atomic64_t *);
3535 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3536 extern int atomic_sub_ret(int, atomic_t *);
3537 extern long atomic64_sub_ret(long, atomic64_t *);
3538
3539 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3540 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3541
3542 #define atomic_inc_return(v) atomic_add_ret(1, v)
3543 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3544 +{
3545 + return atomic_add_ret_unchecked(1, v);
3546 +}
3547 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3548 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3549 +{
3550 + return atomic64_add_ret_unchecked(1, v);
3551 +}
3552
3553 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3554 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3555
3556 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3557 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3558 +{
3559 + return atomic_add_ret_unchecked(i, v);
3560 +}
3561 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3562 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3563 +{
3564 + return atomic64_add_ret_unchecked(i, v);
3565 +}
3566
3567 /*
3568 * atomic_inc_and_test - increment and test
3569 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3570 * other cases.
3571 */
3572 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3573 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3574 +{
3575 + return atomic_inc_return_unchecked(v) == 0;
3576 +}
3577 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3578
3579 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3580 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3581 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3582
3583 #define atomic_inc(v) atomic_add(1, v)
3584 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3585 +{
3586 + atomic_add_unchecked(1, v);
3587 +}
3588 #define atomic64_inc(v) atomic64_add(1, v)
3589 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3590 +{
3591 + atomic64_add_unchecked(1, v);
3592 +}
3593
3594 #define atomic_dec(v) atomic_sub(1, v)
3595 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3596 +{
3597 + atomic_sub_unchecked(1, v);
3598 +}
3599 #define atomic64_dec(v) atomic64_sub(1, v)
3600 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3601 +{
3602 + atomic64_sub_unchecked(1, v);
3603 +}
3604
3605 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3606 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3607
3608 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3609 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3610 +{
3611 + return cmpxchg(&v->counter, old, new);
3612 +}
3613 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3614 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3615 +{
3616 + return xchg(&v->counter, new);
3617 +}
3618
3619 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3620 {
3621 - int c, old;
3622 + int c, old, new;
3623 c = atomic_read(v);
3624 for (;;) {
3625 - if (unlikely(c == (u)))
3626 + if (unlikely(c == u))
3627 break;
3628 - old = atomic_cmpxchg((v), c, c + (a));
3629 +
3630 + asm volatile("addcc %2, %0, %0\n"
3631 +
3632 +#ifdef CONFIG_PAX_REFCOUNT
3633 + "tvs %%icc, 6\n"
3634 +#endif
3635 +
3636 + : "=r" (new)
3637 + : "0" (c), "ir" (a)
3638 + : "cc");
3639 +
3640 + old = atomic_cmpxchg(v, c, new);
3641 if (likely(old == c))
3642 break;
3643 c = old;
3644 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3645 #define atomic64_cmpxchg(v, o, n) \
3646 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3647 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3648 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3649 +{
3650 + return xchg(&v->counter, new);
3651 +}
3652
3653 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3654 {
3655 - long c, old;
3656 + long c, old, new;
3657 c = atomic64_read(v);
3658 for (;;) {
3659 - if (unlikely(c == (u)))
3660 + if (unlikely(c == u))
3661 break;
3662 - old = atomic64_cmpxchg((v), c, c + (a));
3663 +
3664 + asm volatile("addcc %2, %0, %0\n"
3665 +
3666 +#ifdef CONFIG_PAX_REFCOUNT
3667 + "tvs %%xcc, 6\n"
3668 +#endif
3669 +
3670 + : "=r" (new)
3671 + : "0" (c), "ir" (a)
3672 + : "cc");
3673 +
3674 + old = atomic64_cmpxchg(v, c, new);
3675 if (likely(old == c))
3676 break;
3677 c = old;
3678 }
3679 - return c != (u);
3680 + return c != u;
3681 }
3682
3683 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3684 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3685 index 69358b5..17b4745 100644
3686 --- a/arch/sparc/include/asm/cache.h
3687 +++ b/arch/sparc/include/asm/cache.h
3688 @@ -10,7 +10,7 @@
3689 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3690
3691 #define L1_CACHE_SHIFT 5
3692 -#define L1_CACHE_BYTES 32
3693 +#define L1_CACHE_BYTES 32UL
3694
3695 #ifdef CONFIG_SPARC32
3696 #define SMP_CACHE_BYTES_SHIFT 5
3697 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3698 index 4269ca6..e3da77f 100644
3699 --- a/arch/sparc/include/asm/elf_32.h
3700 +++ b/arch/sparc/include/asm/elf_32.h
3701 @@ -114,6 +114,13 @@ typedef struct {
3702
3703 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3704
3705 +#ifdef CONFIG_PAX_ASLR
3706 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3707 +
3708 +#define PAX_DELTA_MMAP_LEN 16
3709 +#define PAX_DELTA_STACK_LEN 16
3710 +#endif
3711 +
3712 /* This yields a mask that user programs can use to figure out what
3713 instruction set this cpu supports. This can NOT be done in userspace
3714 on Sparc. */
3715 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3716 index 7df8b7f..4946269 100644
3717 --- a/arch/sparc/include/asm/elf_64.h
3718 +++ b/arch/sparc/include/asm/elf_64.h
3719 @@ -180,6 +180,13 @@ typedef struct {
3720 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3721 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3722
3723 +#ifdef CONFIG_PAX_ASLR
3724 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3725 +
3726 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3727 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3728 +#endif
3729 +
3730 extern unsigned long sparc64_elf_hwcap;
3731 #define ELF_HWCAP sparc64_elf_hwcap
3732
3733 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
3734 index 156707b..aefa786 100644
3735 --- a/arch/sparc/include/asm/page_32.h
3736 +++ b/arch/sparc/include/asm/page_32.h
3737 @@ -8,6 +8,8 @@
3738 #ifndef _SPARC_PAGE_H
3739 #define _SPARC_PAGE_H
3740
3741 +#include <linux/const.h>
3742 +
3743 #define PAGE_SHIFT 12
3744
3745 #ifndef __ASSEMBLY__
3746 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3747 index a790cc6..091ed94 100644
3748 --- a/arch/sparc/include/asm/pgtable_32.h
3749 +++ b/arch/sparc/include/asm/pgtable_32.h
3750 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3751 BTFIXUPDEF_INT(page_none)
3752 BTFIXUPDEF_INT(page_copy)
3753 BTFIXUPDEF_INT(page_readonly)
3754 +
3755 +#ifdef CONFIG_PAX_PAGEEXEC
3756 +BTFIXUPDEF_INT(page_shared_noexec)
3757 +BTFIXUPDEF_INT(page_copy_noexec)
3758 +BTFIXUPDEF_INT(page_readonly_noexec)
3759 +#endif
3760 +
3761 BTFIXUPDEF_INT(page_kernel)
3762
3763 #define PMD_SHIFT SUN4C_PMD_SHIFT
3764 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3765 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3766 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3767
3768 +#ifdef CONFIG_PAX_PAGEEXEC
3769 +extern pgprot_t PAGE_SHARED_NOEXEC;
3770 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3771 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3772 +#else
3773 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3774 +# define PAGE_COPY_NOEXEC PAGE_COPY
3775 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3776 +#endif
3777 +
3778 extern unsigned long page_kernel;
3779
3780 #ifdef MODULE
3781 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3782 index f6ae2b2..b03ffc7 100644
3783 --- a/arch/sparc/include/asm/pgtsrmmu.h
3784 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3785 @@ -115,6 +115,13 @@
3786 SRMMU_EXEC | SRMMU_REF)
3787 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3788 SRMMU_EXEC | SRMMU_REF)
3789 +
3790 +#ifdef CONFIG_PAX_PAGEEXEC
3791 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3792 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3793 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3794 +#endif
3795 +
3796 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3797 SRMMU_DIRTY | SRMMU_REF)
3798
3799 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3800 index 9689176..63c18ea 100644
3801 --- a/arch/sparc/include/asm/spinlock_64.h
3802 +++ b/arch/sparc/include/asm/spinlock_64.h
3803 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3804
3805 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3806
3807 -static void inline arch_read_lock(arch_rwlock_t *lock)
3808 +static inline void arch_read_lock(arch_rwlock_t *lock)
3809 {
3810 unsigned long tmp1, tmp2;
3811
3812 __asm__ __volatile__ (
3813 "1: ldsw [%2], %0\n"
3814 " brlz,pn %0, 2f\n"
3815 -"4: add %0, 1, %1\n"
3816 +"4: addcc %0, 1, %1\n"
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 +" tvs %%icc, 6\n"
3820 +#endif
3821 +
3822 " cas [%2], %0, %1\n"
3823 " cmp %0, %1\n"
3824 " bne,pn %%icc, 1b\n"
3825 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3826 " .previous"
3827 : "=&r" (tmp1), "=&r" (tmp2)
3828 : "r" (lock)
3829 - : "memory");
3830 + : "memory", "cc");
3831 }
3832
3833 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3834 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3835 {
3836 int tmp1, tmp2;
3837
3838 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3839 "1: ldsw [%2], %0\n"
3840 " brlz,a,pn %0, 2f\n"
3841 " mov 0, %0\n"
3842 -" add %0, 1, %1\n"
3843 +" addcc %0, 1, %1\n"
3844 +
3845 +#ifdef CONFIG_PAX_REFCOUNT
3846 +" tvs %%icc, 6\n"
3847 +#endif
3848 +
3849 " cas [%2], %0, %1\n"
3850 " cmp %0, %1\n"
3851 " bne,pn %%icc, 1b\n"
3852 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3853 return tmp1;
3854 }
3855
3856 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3857 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3858 {
3859 unsigned long tmp1, tmp2;
3860
3861 __asm__ __volatile__(
3862 "1: lduw [%2], %0\n"
3863 -" sub %0, 1, %1\n"
3864 +" subcc %0, 1, %1\n"
3865 +
3866 +#ifdef CONFIG_PAX_REFCOUNT
3867 +" tvs %%icc, 6\n"
3868 +#endif
3869 +
3870 " cas [%2], %0, %1\n"
3871 " cmp %0, %1\n"
3872 " bne,pn %%xcc, 1b\n"
3873 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3874 : "memory");
3875 }
3876
3877 -static void inline arch_write_lock(arch_rwlock_t *lock)
3878 +static inline void arch_write_lock(arch_rwlock_t *lock)
3879 {
3880 unsigned long mask, tmp1, tmp2;
3881
3882 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3883 : "memory");
3884 }
3885
3886 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3887 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3888 {
3889 __asm__ __volatile__(
3890 " stw %%g0, [%0]"
3891 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3892 : "memory");
3893 }
3894
3895 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3896 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3897 {
3898 unsigned long mask, tmp1, tmp2, result;
3899
3900 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3901 index fa57532..e1a4c53 100644
3902 --- a/arch/sparc/include/asm/thread_info_32.h
3903 +++ b/arch/sparc/include/asm/thread_info_32.h
3904 @@ -50,6 +50,8 @@ struct thread_info {
3905 unsigned long w_saved;
3906
3907 struct restart_block restart_block;
3908 +
3909 + unsigned long lowest_stack;
3910 };
3911
3912 /*
3913 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3914 index 60d86be..952dea1 100644
3915 --- a/arch/sparc/include/asm/thread_info_64.h
3916 +++ b/arch/sparc/include/asm/thread_info_64.h
3917 @@ -63,6 +63,8 @@ struct thread_info {
3918 struct pt_regs *kern_una_regs;
3919 unsigned int kern_una_insn;
3920
3921 + unsigned long lowest_stack;
3922 +
3923 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3924 };
3925
3926 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3927 index e88fbe5..96b0ce5 100644
3928 --- a/arch/sparc/include/asm/uaccess.h
3929 +++ b/arch/sparc/include/asm/uaccess.h
3930 @@ -1,5 +1,13 @@
3931 #ifndef ___ASM_SPARC_UACCESS_H
3932 #define ___ASM_SPARC_UACCESS_H
3933 +
3934 +#ifdef __KERNEL__
3935 +#ifndef __ASSEMBLY__
3936 +#include <linux/types.h>
3937 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3938 +#endif
3939 +#endif
3940 +
3941 #if defined(__sparc__) && defined(__arch64__)
3942 #include <asm/uaccess_64.h>
3943 #else
3944 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3945 index 8303ac4..07f333d 100644
3946 --- a/arch/sparc/include/asm/uaccess_32.h
3947 +++ b/arch/sparc/include/asm/uaccess_32.h
3948 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3949
3950 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3951 {
3952 - if (n && __access_ok((unsigned long) to, n))
3953 + if ((long)n < 0)
3954 + return n;
3955 +
3956 + if (n && __access_ok((unsigned long) to, n)) {
3957 + if (!__builtin_constant_p(n))
3958 + check_object_size(from, n, true);
3959 return __copy_user(to, (__force void __user *) from, n);
3960 - else
3961 + } else
3962 return n;
3963 }
3964
3965 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3966 {
3967 + if ((long)n < 0)
3968 + return n;
3969 +
3970 + if (!__builtin_constant_p(n))
3971 + check_object_size(from, n, true);
3972 +
3973 return __copy_user(to, (__force void __user *) from, n);
3974 }
3975
3976 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3977 {
3978 - if (n && __access_ok((unsigned long) from, n))
3979 + if ((long)n < 0)
3980 + return n;
3981 +
3982 + if (n && __access_ok((unsigned long) from, n)) {
3983 + if (!__builtin_constant_p(n))
3984 + check_object_size(to, n, false);
3985 return __copy_user((__force void __user *) to, from, n);
3986 - else
3987 + } else
3988 return n;
3989 }
3990
3991 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3992 {
3993 + if ((long)n < 0)
3994 + return n;
3995 +
3996 return __copy_user((__force void __user *) to, from, n);
3997 }
3998
3999 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4000 index 3e1449f..5293a0e 100644
4001 --- a/arch/sparc/include/asm/uaccess_64.h
4002 +++ b/arch/sparc/include/asm/uaccess_64.h
4003 @@ -10,6 +10,7 @@
4004 #include <linux/compiler.h>
4005 #include <linux/string.h>
4006 #include <linux/thread_info.h>
4007 +#include <linux/kernel.h>
4008 #include <asm/asi.h>
4009 #include <asm/system.h>
4010 #include <asm/spitfire.h>
4011 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4012 static inline unsigned long __must_check
4013 copy_from_user(void *to, const void __user *from, unsigned long size)
4014 {
4015 - unsigned long ret = ___copy_from_user(to, from, size);
4016 + unsigned long ret;
4017
4018 + if ((long)size < 0 || size > INT_MAX)
4019 + return size;
4020 +
4021 + if (!__builtin_constant_p(size))
4022 + check_object_size(to, size, false);
4023 +
4024 + ret = ___copy_from_user(to, from, size);
4025 if (unlikely(ret))
4026 ret = copy_from_user_fixup(to, from, size);
4027
4028 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4029 static inline unsigned long __must_check
4030 copy_to_user(void __user *to, const void *from, unsigned long size)
4031 {
4032 - unsigned long ret = ___copy_to_user(to, from, size);
4033 + unsigned long ret;
4034
4035 + if ((long)size < 0 || size > INT_MAX)
4036 + return size;
4037 +
4038 + if (!__builtin_constant_p(size))
4039 + check_object_size(from, size, true);
4040 +
4041 + ret = ___copy_to_user(to, from, size);
4042 if (unlikely(ret))
4043 ret = copy_to_user_fixup(to, from, size);
4044 return ret;
4045 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4046 index cb85458..e063f17 100644
4047 --- a/arch/sparc/kernel/Makefile
4048 +++ b/arch/sparc/kernel/Makefile
4049 @@ -3,7 +3,7 @@
4050 #
4051
4052 asflags-y := -ansi
4053 -ccflags-y := -Werror
4054 +#ccflags-y := -Werror
4055
4056 extra-y := head_$(BITS).o
4057 extra-y += init_task.o
4058 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4059 index f793742..4d880af 100644
4060 --- a/arch/sparc/kernel/process_32.c
4061 +++ b/arch/sparc/kernel/process_32.c
4062 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
4063 rw->ins[4], rw->ins[5],
4064 rw->ins[6],
4065 rw->ins[7]);
4066 - printk("%pS\n", (void *) rw->ins[7]);
4067 + printk("%pA\n", (void *) rw->ins[7]);
4068 rw = (struct reg_window32 *) rw->ins[6];
4069 }
4070 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4071 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
4072
4073 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4074 r->psr, r->pc, r->npc, r->y, print_tainted());
4075 - printk("PC: <%pS>\n", (void *) r->pc);
4076 + printk("PC: <%pA>\n", (void *) r->pc);
4077 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4078 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4079 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4080 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4081 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4082 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4083 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4084 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4085
4086 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4087 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4088 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4089 rw = (struct reg_window32 *) fp;
4090 pc = rw->ins[7];
4091 printk("[%08lx : ", pc);
4092 - printk("%pS ] ", (void *) pc);
4093 + printk("%pA ] ", (void *) pc);
4094 fp = rw->ins[6];
4095 } while (++count < 16);
4096 printk("\n");
4097 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4098 index 3739a06..48b2ff0 100644
4099 --- a/arch/sparc/kernel/process_64.c
4100 +++ b/arch/sparc/kernel/process_64.c
4101 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4102 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4103 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4104 if (regs->tstate & TSTATE_PRIV)
4105 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4106 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4107 }
4108
4109 void show_regs(struct pt_regs *regs)
4110 {
4111 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4112 regs->tpc, regs->tnpc, regs->y, print_tainted());
4113 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4114 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4115 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4116 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4117 regs->u_regs[3]);
4118 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4119 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4120 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4121 regs->u_regs[15]);
4122 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4123 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4124 show_regwindow(regs);
4125 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
4126 }
4127 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
4128 ((tp && tp->task) ? tp->task->pid : -1));
4129
4130 if (gp->tstate & TSTATE_PRIV) {
4131 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4132 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4133 (void *) gp->tpc,
4134 (void *) gp->o7,
4135 (void *) gp->i7,
4136 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
4137 index 42b282f..28ce9f2 100644
4138 --- a/arch/sparc/kernel/sys_sparc_32.c
4139 +++ b/arch/sparc/kernel/sys_sparc_32.c
4140 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4141 if (ARCH_SUN4C && len > 0x20000000)
4142 return -ENOMEM;
4143 if (!addr)
4144 - addr = TASK_UNMAPPED_BASE;
4145 + addr = current->mm->mmap_base;
4146
4147 if (flags & MAP_SHARED)
4148 addr = COLOUR_ALIGN(addr);
4149 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4150 }
4151 if (TASK_SIZE - PAGE_SIZE - len < addr)
4152 return -ENOMEM;
4153 - if (!vmm || addr + len <= vmm->vm_start)
4154 + if (check_heap_stack_gap(vmm, addr, len))
4155 return addr;
4156 addr = vmm->vm_end;
4157 if (flags & MAP_SHARED)
4158 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
4159 index 441521a..b767073 100644
4160 --- a/arch/sparc/kernel/sys_sparc_64.c
4161 +++ b/arch/sparc/kernel/sys_sparc_64.c
4162 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4163 /* We do not accept a shared mapping if it would violate
4164 * cache aliasing constraints.
4165 */
4166 - if ((flags & MAP_SHARED) &&
4167 + if ((filp || (flags & MAP_SHARED)) &&
4168 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4169 return -EINVAL;
4170 return addr;
4171 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4172 if (filp || (flags & MAP_SHARED))
4173 do_color_align = 1;
4174
4175 +#ifdef CONFIG_PAX_RANDMMAP
4176 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4177 +#endif
4178 +
4179 if (addr) {
4180 if (do_color_align)
4181 addr = COLOUR_ALIGN(addr, pgoff);
4182 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4183 addr = PAGE_ALIGN(addr);
4184
4185 vma = find_vma(mm, addr);
4186 - if (task_size - len >= addr &&
4187 - (!vma || addr + len <= vma->vm_start))
4188 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4189 return addr;
4190 }
4191
4192 if (len > mm->cached_hole_size) {
4193 - start_addr = addr = mm->free_area_cache;
4194 + start_addr = addr = mm->free_area_cache;
4195 } else {
4196 - start_addr = addr = TASK_UNMAPPED_BASE;
4197 + start_addr = addr = mm->mmap_base;
4198 mm->cached_hole_size = 0;
4199 }
4200
4201 @@ -174,14 +177,14 @@ full_search:
4202 vma = find_vma(mm, VA_EXCLUDE_END);
4203 }
4204 if (unlikely(task_size < addr)) {
4205 - if (start_addr != TASK_UNMAPPED_BASE) {
4206 - start_addr = addr = TASK_UNMAPPED_BASE;
4207 + if (start_addr != mm->mmap_base) {
4208 + start_addr = addr = mm->mmap_base;
4209 mm->cached_hole_size = 0;
4210 goto full_search;
4211 }
4212 return -ENOMEM;
4213 }
4214 - if (likely(!vma || addr + len <= vma->vm_start)) {
4215 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4216 /*
4217 * Remember the place where we stopped the search:
4218 */
4219 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4220 /* We do not accept a shared mapping if it would violate
4221 * cache aliasing constraints.
4222 */
4223 - if ((flags & MAP_SHARED) &&
4224 + if ((filp || (flags & MAP_SHARED)) &&
4225 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4226 return -EINVAL;
4227 return addr;
4228 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4229 addr = PAGE_ALIGN(addr);
4230
4231 vma = find_vma(mm, addr);
4232 - if (task_size - len >= addr &&
4233 - (!vma || addr + len <= vma->vm_start))
4234 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4235 return addr;
4236 }
4237
4238 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4239 /* make sure it can fit in the remaining address space */
4240 if (likely(addr > len)) {
4241 vma = find_vma(mm, addr-len);
4242 - if (!vma || addr <= vma->vm_start) {
4243 + if (check_heap_stack_gap(vma, addr - len, len)) {
4244 /* remember the address as a hint for next time */
4245 return (mm->free_area_cache = addr-len);
4246 }
4247 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4248 if (unlikely(mm->mmap_base < len))
4249 goto bottomup;
4250
4251 - addr = mm->mmap_base-len;
4252 - if (do_color_align)
4253 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4254 + addr = mm->mmap_base - len;
4255
4256 do {
4257 + if (do_color_align)
4258 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4259 /*
4260 * Lookup failure means no vma is above this address,
4261 * else if new region fits below vma->vm_start,
4262 * return with success:
4263 */
4264 vma = find_vma(mm, addr);
4265 - if (likely(!vma || addr+len <= vma->vm_start)) {
4266 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4267 /* remember the address as a hint for next time */
4268 return (mm->free_area_cache = addr);
4269 }
4270 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 mm->cached_hole_size = vma->vm_start - addr;
4272
4273 /* try just below the current vma->vm_start */
4274 - addr = vma->vm_start-len;
4275 - if (do_color_align)
4276 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4277 - } while (likely(len < vma->vm_start));
4278 + addr = skip_heap_stack_gap(vma, len);
4279 + } while (!IS_ERR_VALUE(addr));
4280
4281 bottomup:
4282 /*
4283 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4284 gap == RLIM_INFINITY ||
4285 sysctl_legacy_va_layout) {
4286 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4287 +
4288 +#ifdef CONFIG_PAX_RANDMMAP
4289 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4290 + mm->mmap_base += mm->delta_mmap;
4291 +#endif
4292 +
4293 mm->get_unmapped_area = arch_get_unmapped_area;
4294 mm->unmap_area = arch_unmap_area;
4295 } else {
4296 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4297 gap = (task_size / 6 * 5);
4298
4299 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4300 +
4301 +#ifdef CONFIG_PAX_RANDMMAP
4302 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4303 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4304 +#endif
4305 +
4306 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4307 mm->unmap_area = arch_unmap_area_topdown;
4308 }
4309 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4310 index 591f20c..0f1b925 100644
4311 --- a/arch/sparc/kernel/traps_32.c
4312 +++ b/arch/sparc/kernel/traps_32.c
4313 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4314 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4315 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4316
4317 +extern void gr_handle_kernel_exploit(void);
4318 +
4319 void die_if_kernel(char *str, struct pt_regs *regs)
4320 {
4321 static int die_counter;
4322 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4323 count++ < 30 &&
4324 (((unsigned long) rw) >= PAGE_OFFSET) &&
4325 !(((unsigned long) rw) & 0x7)) {
4326 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4327 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4328 (void *) rw->ins[7]);
4329 rw = (struct reg_window32 *)rw->ins[6];
4330 }
4331 }
4332 printk("Instruction DUMP:");
4333 instruction_dump ((unsigned long *) regs->pc);
4334 - if(regs->psr & PSR_PS)
4335 + if(regs->psr & PSR_PS) {
4336 + gr_handle_kernel_exploit();
4337 do_exit(SIGKILL);
4338 + }
4339 do_exit(SIGSEGV);
4340 }
4341
4342 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4343 index 0cbdaa4..438e4c9 100644
4344 --- a/arch/sparc/kernel/traps_64.c
4345 +++ b/arch/sparc/kernel/traps_64.c
4346 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4347 i + 1,
4348 p->trapstack[i].tstate, p->trapstack[i].tpc,
4349 p->trapstack[i].tnpc, p->trapstack[i].tt);
4350 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4351 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4352 }
4353 }
4354
4355 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4356
4357 lvl -= 0x100;
4358 if (regs->tstate & TSTATE_PRIV) {
4359 +
4360 +#ifdef CONFIG_PAX_REFCOUNT
4361 + if (lvl == 6)
4362 + pax_report_refcount_overflow(regs);
4363 +#endif
4364 +
4365 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4366 die_if_kernel(buffer, regs);
4367 }
4368 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4369 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4370 {
4371 char buffer[32];
4372 -
4373 +
4374 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4375 0, lvl, SIGTRAP) == NOTIFY_STOP)
4376 return;
4377
4378 +#ifdef CONFIG_PAX_REFCOUNT
4379 + if (lvl == 6)
4380 + pax_report_refcount_overflow(regs);
4381 +#endif
4382 +
4383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4384
4385 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4386 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4387 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4388 printk("%s" "ERROR(%d): ",
4389 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4390 - printk("TPC<%pS>\n", (void *) regs->tpc);
4391 + printk("TPC<%pA>\n", (void *) regs->tpc);
4392 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4393 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4394 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4395 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4396 smp_processor_id(),
4397 (type & 0x1) ? 'I' : 'D',
4398 regs->tpc);
4399 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4400 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4401 panic("Irrecoverable Cheetah+ parity error.");
4402 }
4403
4404 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4405 smp_processor_id(),
4406 (type & 0x1) ? 'I' : 'D',
4407 regs->tpc);
4408 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4409 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4410 }
4411
4412 struct sun4v_error_entry {
4413 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4414
4415 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4416 regs->tpc, tl);
4417 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4418 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4419 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4420 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4421 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4422 (void *) regs->u_regs[UREG_I7]);
4423 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4424 "pte[%lx] error[%lx]\n",
4425 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4426
4427 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4428 regs->tpc, tl);
4429 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4430 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4431 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4432 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4433 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4434 (void *) regs->u_regs[UREG_I7]);
4435 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4436 "pte[%lx] error[%lx]\n",
4437 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4438 fp = (unsigned long)sf->fp + STACK_BIAS;
4439 }
4440
4441 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4442 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4444 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4445 int index = tsk->curr_ret_stack;
4446 if (tsk->ret_stack && index >= graph) {
4447 pc = tsk->ret_stack[index - graph].ret;
4448 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4449 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4450 graph++;
4451 }
4452 }
4453 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4454 return (struct reg_window *) (fp + STACK_BIAS);
4455 }
4456
4457 +extern void gr_handle_kernel_exploit(void);
4458 +
4459 void die_if_kernel(char *str, struct pt_regs *regs)
4460 {
4461 static int die_counter;
4462 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4463 while (rw &&
4464 count++ < 30 &&
4465 kstack_valid(tp, (unsigned long) rw)) {
4466 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4467 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4468 (void *) rw->ins[7]);
4469
4470 rw = kernel_stack_up(rw);
4471 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4472 }
4473 user_instruction_dump ((unsigned int __user *) regs->tpc);
4474 }
4475 - if (regs->tstate & TSTATE_PRIV)
4476 + if (regs->tstate & TSTATE_PRIV) {
4477 + gr_handle_kernel_exploit();
4478 do_exit(SIGKILL);
4479 + }
4480 do_exit(SIGSEGV);
4481 }
4482 EXPORT_SYMBOL(die_if_kernel);
4483 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4484 index 76e4ac1..78f8bb1 100644
4485 --- a/arch/sparc/kernel/unaligned_64.c
4486 +++ b/arch/sparc/kernel/unaligned_64.c
4487 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4488 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4489
4490 if (__ratelimit(&ratelimit)) {
4491 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4492 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4493 regs->tpc, (void *) regs->tpc);
4494 }
4495 }
4496 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4497 index a3fc437..fea9957 100644
4498 --- a/arch/sparc/lib/Makefile
4499 +++ b/arch/sparc/lib/Makefile
4500 @@ -2,7 +2,7 @@
4501 #
4502
4503 asflags-y := -ansi -DST_DIV0=0x02
4504 -ccflags-y := -Werror
4505 +#ccflags-y := -Werror
4506
4507 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4508 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4509 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4510 index 59186e0..f747d7a 100644
4511 --- a/arch/sparc/lib/atomic_64.S
4512 +++ b/arch/sparc/lib/atomic_64.S
4513 @@ -18,7 +18,12 @@
4514 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4515 BACKOFF_SETUP(%o2)
4516 1: lduw [%o1], %g1
4517 - add %g1, %o0, %g7
4518 + addcc %g1, %o0, %g7
4519 +
4520 +#ifdef CONFIG_PAX_REFCOUNT
4521 + tvs %icc, 6
4522 +#endif
4523 +
4524 cas [%o1], %g1, %g7
4525 cmp %g1, %g7
4526 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4527 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4528 2: BACKOFF_SPIN(%o2, %o3, 1b)
4529 .size atomic_add, .-atomic_add
4530
4531 + .globl atomic_add_unchecked
4532 + .type atomic_add_unchecked,#function
4533 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4534 + BACKOFF_SETUP(%o2)
4535 +1: lduw [%o1], %g1
4536 + add %g1, %o0, %g7
4537 + cas [%o1], %g1, %g7
4538 + cmp %g1, %g7
4539 + bne,pn %icc, 2f
4540 + nop
4541 + retl
4542 + nop
4543 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4544 + .size atomic_add_unchecked, .-atomic_add_unchecked
4545 +
4546 .globl atomic_sub
4547 .type atomic_sub,#function
4548 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4549 BACKOFF_SETUP(%o2)
4550 1: lduw [%o1], %g1
4551 - sub %g1, %o0, %g7
4552 + subcc %g1, %o0, %g7
4553 +
4554 +#ifdef CONFIG_PAX_REFCOUNT
4555 + tvs %icc, 6
4556 +#endif
4557 +
4558 cas [%o1], %g1, %g7
4559 cmp %g1, %g7
4560 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4561 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4562 2: BACKOFF_SPIN(%o2, %o3, 1b)
4563 .size atomic_sub, .-atomic_sub
4564
4565 + .globl atomic_sub_unchecked
4566 + .type atomic_sub_unchecked,#function
4567 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4568 + BACKOFF_SETUP(%o2)
4569 +1: lduw [%o1], %g1
4570 + sub %g1, %o0, %g7
4571 + cas [%o1], %g1, %g7
4572 + cmp %g1, %g7
4573 + bne,pn %icc, 2f
4574 + nop
4575 + retl
4576 + nop
4577 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4578 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4579 +
4580 .globl atomic_add_ret
4581 .type atomic_add_ret,#function
4582 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4583 BACKOFF_SETUP(%o2)
4584 1: lduw [%o1], %g1
4585 - add %g1, %o0, %g7
4586 + addcc %g1, %o0, %g7
4587 +
4588 +#ifdef CONFIG_PAX_REFCOUNT
4589 + tvs %icc, 6
4590 +#endif
4591 +
4592 cas [%o1], %g1, %g7
4593 cmp %g1, %g7
4594 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4595 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4596 2: BACKOFF_SPIN(%o2, %o3, 1b)
4597 .size atomic_add_ret, .-atomic_add_ret
4598
4599 + .globl atomic_add_ret_unchecked
4600 + .type atomic_add_ret_unchecked,#function
4601 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4602 + BACKOFF_SETUP(%o2)
4603 +1: lduw [%o1], %g1
4604 + addcc %g1, %o0, %g7
4605 + cas [%o1], %g1, %g7
4606 + cmp %g1, %g7
4607 + bne,pn %icc, 2f
4608 + add %g7, %o0, %g7
4609 + sra %g7, 0, %o0
4610 + retl
4611 + nop
4612 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4613 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4614 +
4615 .globl atomic_sub_ret
4616 .type atomic_sub_ret,#function
4617 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4618 BACKOFF_SETUP(%o2)
4619 1: lduw [%o1], %g1
4620 - sub %g1, %o0, %g7
4621 + subcc %g1, %o0, %g7
4622 +
4623 +#ifdef CONFIG_PAX_REFCOUNT
4624 + tvs %icc, 6
4625 +#endif
4626 +
4627 cas [%o1], %g1, %g7
4628 cmp %g1, %g7
4629 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4630 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4631 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4632 BACKOFF_SETUP(%o2)
4633 1: ldx [%o1], %g1
4634 - add %g1, %o0, %g7
4635 + addcc %g1, %o0, %g7
4636 +
4637 +#ifdef CONFIG_PAX_REFCOUNT
4638 + tvs %xcc, 6
4639 +#endif
4640 +
4641 casx [%o1], %g1, %g7
4642 cmp %g1, %g7
4643 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4644 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4645 2: BACKOFF_SPIN(%o2, %o3, 1b)
4646 .size atomic64_add, .-atomic64_add
4647
4648 + .globl atomic64_add_unchecked
4649 + .type atomic64_add_unchecked,#function
4650 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4651 + BACKOFF_SETUP(%o2)
4652 +1: ldx [%o1], %g1
4653 + addcc %g1, %o0, %g7
4654 + casx [%o1], %g1, %g7
4655 + cmp %g1, %g7
4656 + bne,pn %xcc, 2f
4657 + nop
4658 + retl
4659 + nop
4660 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4661 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4662 +
4663 .globl atomic64_sub
4664 .type atomic64_sub,#function
4665 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4666 BACKOFF_SETUP(%o2)
4667 1: ldx [%o1], %g1
4668 - sub %g1, %o0, %g7
4669 + subcc %g1, %o0, %g7
4670 +
4671 +#ifdef CONFIG_PAX_REFCOUNT
4672 + tvs %xcc, 6
4673 +#endif
4674 +
4675 casx [%o1], %g1, %g7
4676 cmp %g1, %g7
4677 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4678 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4679 2: BACKOFF_SPIN(%o2, %o3, 1b)
4680 .size atomic64_sub, .-atomic64_sub
4681
4682 + .globl atomic64_sub_unchecked
4683 + .type atomic64_sub_unchecked,#function
4684 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4685 + BACKOFF_SETUP(%o2)
4686 +1: ldx [%o1], %g1
4687 + subcc %g1, %o0, %g7
4688 + casx [%o1], %g1, %g7
4689 + cmp %g1, %g7
4690 + bne,pn %xcc, 2f
4691 + nop
4692 + retl
4693 + nop
4694 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4695 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4696 +
4697 .globl atomic64_add_ret
4698 .type atomic64_add_ret,#function
4699 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4700 BACKOFF_SETUP(%o2)
4701 1: ldx [%o1], %g1
4702 - add %g1, %o0, %g7
4703 + addcc %g1, %o0, %g7
4704 +
4705 +#ifdef CONFIG_PAX_REFCOUNT
4706 + tvs %xcc, 6
4707 +#endif
4708 +
4709 casx [%o1], %g1, %g7
4710 cmp %g1, %g7
4711 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4712 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4713 2: BACKOFF_SPIN(%o2, %o3, 1b)
4714 .size atomic64_add_ret, .-atomic64_add_ret
4715
4716 + .globl atomic64_add_ret_unchecked
4717 + .type atomic64_add_ret_unchecked,#function
4718 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4719 + BACKOFF_SETUP(%o2)
4720 +1: ldx [%o1], %g1
4721 + addcc %g1, %o0, %g7
4722 + casx [%o1], %g1, %g7
4723 + cmp %g1, %g7
4724 + bne,pn %xcc, 2f
4725 + add %g7, %o0, %g7
4726 + mov %g7, %o0
4727 + retl
4728 + nop
4729 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4730 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4731 +
4732 .globl atomic64_sub_ret
4733 .type atomic64_sub_ret,#function
4734 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4735 BACKOFF_SETUP(%o2)
4736 1: ldx [%o1], %g1
4737 - sub %g1, %o0, %g7
4738 + subcc %g1, %o0, %g7
4739 +
4740 +#ifdef CONFIG_PAX_REFCOUNT
4741 + tvs %xcc, 6
4742 +#endif
4743 +
4744 casx [%o1], %g1, %g7
4745 cmp %g1, %g7
4746 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4747 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4748 index 1b30bb3..b4a16c7 100644
4749 --- a/arch/sparc/lib/ksyms.c
4750 +++ b/arch/sparc/lib/ksyms.c
4751 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4752
4753 /* Atomic counter implementation. */
4754 EXPORT_SYMBOL(atomic_add);
4755 +EXPORT_SYMBOL(atomic_add_unchecked);
4756 EXPORT_SYMBOL(atomic_add_ret);
4757 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4758 EXPORT_SYMBOL(atomic_sub);
4759 +EXPORT_SYMBOL(atomic_sub_unchecked);
4760 EXPORT_SYMBOL(atomic_sub_ret);
4761 EXPORT_SYMBOL(atomic64_add);
4762 +EXPORT_SYMBOL(atomic64_add_unchecked);
4763 EXPORT_SYMBOL(atomic64_add_ret);
4764 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4765 EXPORT_SYMBOL(atomic64_sub);
4766 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4767 EXPORT_SYMBOL(atomic64_sub_ret);
4768
4769 /* Atomic bit operations. */
4770 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4771 index 301421c..e2535d1 100644
4772 --- a/arch/sparc/mm/Makefile
4773 +++ b/arch/sparc/mm/Makefile
4774 @@ -2,7 +2,7 @@
4775 #
4776
4777 asflags-y := -ansi
4778 -ccflags-y := -Werror
4779 +#ccflags-y := -Werror
4780
4781 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4782 obj-y += fault_$(BITS).o
4783 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4784 index 8023fd7..c8e89e9 100644
4785 --- a/arch/sparc/mm/fault_32.c
4786 +++ b/arch/sparc/mm/fault_32.c
4787 @@ -21,6 +21,9 @@
4788 #include <linux/perf_event.h>
4789 #include <linux/interrupt.h>
4790 #include <linux/kdebug.h>
4791 +#include <linux/slab.h>
4792 +#include <linux/pagemap.h>
4793 +#include <linux/compiler.h>
4794
4795 #include <asm/system.h>
4796 #include <asm/page.h>
4797 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4798 return safe_compute_effective_address(regs, insn);
4799 }
4800
4801 +#ifdef CONFIG_PAX_PAGEEXEC
4802 +#ifdef CONFIG_PAX_DLRESOLVE
4803 +static void pax_emuplt_close(struct vm_area_struct *vma)
4804 +{
4805 + vma->vm_mm->call_dl_resolve = 0UL;
4806 +}
4807 +
4808 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4809 +{
4810 + unsigned int *kaddr;
4811 +
4812 + vmf->page = alloc_page(GFP_HIGHUSER);
4813 + if (!vmf->page)
4814 + return VM_FAULT_OOM;
4815 +
4816 + kaddr = kmap(vmf->page);
4817 + memset(kaddr, 0, PAGE_SIZE);
4818 + kaddr[0] = 0x9DE3BFA8U; /* save */
4819 + flush_dcache_page(vmf->page);
4820 + kunmap(vmf->page);
4821 + return VM_FAULT_MAJOR;
4822 +}
4823 +
4824 +static const struct vm_operations_struct pax_vm_ops = {
4825 + .close = pax_emuplt_close,
4826 + .fault = pax_emuplt_fault
4827 +};
4828 +
4829 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4830 +{
4831 + int ret;
4832 +
4833 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4834 + vma->vm_mm = current->mm;
4835 + vma->vm_start = addr;
4836 + vma->vm_end = addr + PAGE_SIZE;
4837 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4838 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4839 + vma->vm_ops = &pax_vm_ops;
4840 +
4841 + ret = insert_vm_struct(current->mm, vma);
4842 + if (ret)
4843 + return ret;
4844 +
4845 + ++current->mm->total_vm;
4846 + return 0;
4847 +}
4848 +#endif
4849 +
4850 +/*
4851 + * PaX: decide what to do with offenders (regs->pc = fault address)
4852 + *
4853 + * returns 1 when task should be killed
4854 + * 2 when patched PLT trampoline was detected
4855 + * 3 when unpatched PLT trampoline was detected
4856 + */
4857 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4858 +{
4859 +
4860 +#ifdef CONFIG_PAX_EMUPLT
4861 + int err;
4862 +
4863 + do { /* PaX: patched PLT emulation #1 */
4864 + unsigned int sethi1, sethi2, jmpl;
4865 +
4866 + err = get_user(sethi1, (unsigned int *)regs->pc);
4867 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4868 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4869 +
4870 + if (err)
4871 + break;
4872 +
4873 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4874 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4875 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4876 + {
4877 + unsigned int addr;
4878 +
4879 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4880 + addr = regs->u_regs[UREG_G1];
4881 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4882 + regs->pc = addr;
4883 + regs->npc = addr+4;
4884 + return 2;
4885 + }
4886 + } while (0);
4887 +
4888 + { /* PaX: patched PLT emulation #2 */
4889 + unsigned int ba;
4890 +
4891 + err = get_user(ba, (unsigned int *)regs->pc);
4892 +
4893 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4894 + unsigned int addr;
4895 +
4896 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4897 + regs->pc = addr;
4898 + regs->npc = addr+4;
4899 + return 2;
4900 + }
4901 + }
4902 +
4903 + do { /* PaX: patched PLT emulation #3 */
4904 + unsigned int sethi, jmpl, nop;
4905 +
4906 + err = get_user(sethi, (unsigned int *)regs->pc);
4907 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4908 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4909 +
4910 + if (err)
4911 + break;
4912 +
4913 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4914 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4915 + nop == 0x01000000U)
4916 + {
4917 + unsigned int addr;
4918 +
4919 + addr = (sethi & 0x003FFFFFU) << 10;
4920 + regs->u_regs[UREG_G1] = addr;
4921 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4922 + regs->pc = addr;
4923 + regs->npc = addr+4;
4924 + return 2;
4925 + }
4926 + } while (0);
4927 +
4928 + do { /* PaX: unpatched PLT emulation step 1 */
4929 + unsigned int sethi, ba, nop;
4930 +
4931 + err = get_user(sethi, (unsigned int *)regs->pc);
4932 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4933 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4934 +
4935 + if (err)
4936 + break;
4937 +
4938 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4939 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4940 + nop == 0x01000000U)
4941 + {
4942 + unsigned int addr, save, call;
4943 +
4944 + if ((ba & 0xFFC00000U) == 0x30800000U)
4945 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4946 + else
4947 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4948 +
4949 + err = get_user(save, (unsigned int *)addr);
4950 + err |= get_user(call, (unsigned int *)(addr+4));
4951 + err |= get_user(nop, (unsigned int *)(addr+8));
4952 + if (err)
4953 + break;
4954 +
4955 +#ifdef CONFIG_PAX_DLRESOLVE
4956 + if (save == 0x9DE3BFA8U &&
4957 + (call & 0xC0000000U) == 0x40000000U &&
4958 + nop == 0x01000000U)
4959 + {
4960 + struct vm_area_struct *vma;
4961 + unsigned long call_dl_resolve;
4962 +
4963 + down_read(&current->mm->mmap_sem);
4964 + call_dl_resolve = current->mm->call_dl_resolve;
4965 + up_read(&current->mm->mmap_sem);
4966 + if (likely(call_dl_resolve))
4967 + goto emulate;
4968 +
4969 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4970 +
4971 + down_write(&current->mm->mmap_sem);
4972 + if (current->mm->call_dl_resolve) {
4973 + call_dl_resolve = current->mm->call_dl_resolve;
4974 + up_write(&current->mm->mmap_sem);
4975 + if (vma)
4976 + kmem_cache_free(vm_area_cachep, vma);
4977 + goto emulate;
4978 + }
4979 +
4980 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4981 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4982 + up_write(&current->mm->mmap_sem);
4983 + if (vma)
4984 + kmem_cache_free(vm_area_cachep, vma);
4985 + return 1;
4986 + }
4987 +
4988 + if (pax_insert_vma(vma, call_dl_resolve)) {
4989 + up_write(&current->mm->mmap_sem);
4990 + kmem_cache_free(vm_area_cachep, vma);
4991 + return 1;
4992 + }
4993 +
4994 + current->mm->call_dl_resolve = call_dl_resolve;
4995 + up_write(&current->mm->mmap_sem);
4996 +
4997 +emulate:
4998 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4999 + regs->pc = call_dl_resolve;
5000 + regs->npc = addr+4;
5001 + return 3;
5002 + }
5003 +#endif
5004 +
5005 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5006 + if ((save & 0xFFC00000U) == 0x05000000U &&
5007 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5008 + nop == 0x01000000U)
5009 + {
5010 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5011 + regs->u_regs[UREG_G2] = addr + 4;
5012 + addr = (save & 0x003FFFFFU) << 10;
5013 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5014 + regs->pc = addr;
5015 + regs->npc = addr+4;
5016 + return 3;
5017 + }
5018 + }
5019 + } while (0);
5020 +
5021 + do { /* PaX: unpatched PLT emulation step 2 */
5022 + unsigned int save, call, nop;
5023 +
5024 + err = get_user(save, (unsigned int *)(regs->pc-4));
5025 + err |= get_user(call, (unsigned int *)regs->pc);
5026 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5027 + if (err)
5028 + break;
5029 +
5030 + if (save == 0x9DE3BFA8U &&
5031 + (call & 0xC0000000U) == 0x40000000U &&
5032 + nop == 0x01000000U)
5033 + {
5034 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5035 +
5036 + regs->u_regs[UREG_RETPC] = regs->pc;
5037 + regs->pc = dl_resolve;
5038 + regs->npc = dl_resolve+4;
5039 + return 3;
5040 + }
5041 + } while (0);
5042 +#endif
5043 +
5044 + return 1;
5045 +}
5046 +
5047 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5048 +{
5049 + unsigned long i;
5050 +
5051 + printk(KERN_ERR "PAX: bytes at PC: ");
5052 + for (i = 0; i < 8; i++) {
5053 + unsigned int c;
5054 + if (get_user(c, (unsigned int *)pc+i))
5055 + printk(KERN_CONT "???????? ");
5056 + else
5057 + printk(KERN_CONT "%08x ", c);
5058 + }
5059 + printk("\n");
5060 +}
5061 +#endif
5062 +
5063 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
5064 int text_fault)
5065 {
5066 @@ -280,6 +545,24 @@ good_area:
5067 if(!(vma->vm_flags & VM_WRITE))
5068 goto bad_area;
5069 } else {
5070 +
5071 +#ifdef CONFIG_PAX_PAGEEXEC
5072 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5073 + up_read(&mm->mmap_sem);
5074 + switch (pax_handle_fetch_fault(regs)) {
5075 +
5076 +#ifdef CONFIG_PAX_EMUPLT
5077 + case 2:
5078 + case 3:
5079 + return;
5080 +#endif
5081 +
5082 + }
5083 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5084 + do_group_exit(SIGKILL);
5085 + }
5086 +#endif
5087 +
5088 /* Allow reads even for write-only mappings */
5089 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5090 goto bad_area;
5091 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
5092 index 504c062..6fcb9c6 100644
5093 --- a/arch/sparc/mm/fault_64.c
5094 +++ b/arch/sparc/mm/fault_64.c
5095 @@ -21,6 +21,9 @@
5096 #include <linux/kprobes.h>
5097 #include <linux/kdebug.h>
5098 #include <linux/percpu.h>
5099 +#include <linux/slab.h>
5100 +#include <linux/pagemap.h>
5101 +#include <linux/compiler.h>
5102
5103 #include <asm/page.h>
5104 #include <asm/pgtable.h>
5105 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
5106 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5107 regs->tpc);
5108 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5109 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5110 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5111 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5112 dump_stack();
5113 unhandled_fault(regs->tpc, current, regs);
5114 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
5115 show_regs(regs);
5116 }
5117
5118 +#ifdef CONFIG_PAX_PAGEEXEC
5119 +#ifdef CONFIG_PAX_DLRESOLVE
5120 +static void pax_emuplt_close(struct vm_area_struct *vma)
5121 +{
5122 + vma->vm_mm->call_dl_resolve = 0UL;
5123 +}
5124 +
5125 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5126 +{
5127 + unsigned int *kaddr;
5128 +
5129 + vmf->page = alloc_page(GFP_HIGHUSER);
5130 + if (!vmf->page)
5131 + return VM_FAULT_OOM;
5132 +
5133 + kaddr = kmap(vmf->page);
5134 + memset(kaddr, 0, PAGE_SIZE);
5135 + kaddr[0] = 0x9DE3BFA8U; /* save */
5136 + flush_dcache_page(vmf->page);
5137 + kunmap(vmf->page);
5138 + return VM_FAULT_MAJOR;
5139 +}
5140 +
5141 +static const struct vm_operations_struct pax_vm_ops = {
5142 + .close = pax_emuplt_close,
5143 + .fault = pax_emuplt_fault
5144 +};
5145 +
5146 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5147 +{
5148 + int ret;
5149 +
5150 + INIT_LIST_HEAD(&vma->anon_vma_chain);
5151 + vma->vm_mm = current->mm;
5152 + vma->vm_start = addr;
5153 + vma->vm_end = addr + PAGE_SIZE;
5154 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5155 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5156 + vma->vm_ops = &pax_vm_ops;
5157 +
5158 + ret = insert_vm_struct(current->mm, vma);
5159 + if (ret)
5160 + return ret;
5161 +
5162 + ++current->mm->total_vm;
5163 + return 0;
5164 +}
5165 +#endif
5166 +
5167 +/*
5168 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5169 + *
5170 + * returns 1 when task should be killed
5171 + * 2 when patched PLT trampoline was detected
5172 + * 3 when unpatched PLT trampoline was detected
5173 + */
5174 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5175 +{
5176 +
5177 +#ifdef CONFIG_PAX_EMUPLT
5178 + int err;
5179 +
5180 + do { /* PaX: patched PLT emulation #1 */
5181 + unsigned int sethi1, sethi2, jmpl;
5182 +
5183 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5184 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5185 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5186 +
5187 + if (err)
5188 + break;
5189 +
5190 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5191 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5192 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5193 + {
5194 + unsigned long addr;
5195 +
5196 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5197 + addr = regs->u_regs[UREG_G1];
5198 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5199 +
5200 + if (test_thread_flag(TIF_32BIT))
5201 + addr &= 0xFFFFFFFFUL;
5202 +
5203 + regs->tpc = addr;
5204 + regs->tnpc = addr+4;
5205 + return 2;
5206 + }
5207 + } while (0);
5208 +
5209 + { /* PaX: patched PLT emulation #2 */
5210 + unsigned int ba;
5211 +
5212 + err = get_user(ba, (unsigned int *)regs->tpc);
5213 +
5214 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5215 + unsigned long addr;
5216 +
5217 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5218 +
5219 + if (test_thread_flag(TIF_32BIT))
5220 + addr &= 0xFFFFFFFFUL;
5221 +
5222 + regs->tpc = addr;
5223 + regs->tnpc = addr+4;
5224 + return 2;
5225 + }
5226 + }
5227 +
5228 + do { /* PaX: patched PLT emulation #3 */
5229 + unsigned int sethi, jmpl, nop;
5230 +
5231 + err = get_user(sethi, (unsigned int *)regs->tpc);
5232 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5233 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5234 +
5235 + if (err)
5236 + break;
5237 +
5238 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5239 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5240 + nop == 0x01000000U)
5241 + {
5242 + unsigned long addr;
5243 +
5244 + addr = (sethi & 0x003FFFFFU) << 10;
5245 + regs->u_regs[UREG_G1] = addr;
5246 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5247 +
5248 + if (test_thread_flag(TIF_32BIT))
5249 + addr &= 0xFFFFFFFFUL;
5250 +
5251 + regs->tpc = addr;
5252 + regs->tnpc = addr+4;
5253 + return 2;
5254 + }
5255 + } while (0);
5256 +
5257 + do { /* PaX: patched PLT emulation #4 */
5258 + unsigned int sethi, mov1, call, mov2;
5259 +
5260 + err = get_user(sethi, (unsigned int *)regs->tpc);
5261 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5262 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5263 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5264 +
5265 + if (err)
5266 + break;
5267 +
5268 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5269 + mov1 == 0x8210000FU &&
5270 + (call & 0xC0000000U) == 0x40000000U &&
5271 + mov2 == 0x9E100001U)
5272 + {
5273 + unsigned long addr;
5274 +
5275 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5276 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5277 +
5278 + if (test_thread_flag(TIF_32BIT))
5279 + addr &= 0xFFFFFFFFUL;
5280 +
5281 + regs->tpc = addr;
5282 + regs->tnpc = addr+4;
5283 + return 2;
5284 + }
5285 + } while (0);
5286 +
5287 + do { /* PaX: patched PLT emulation #5 */
5288 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5289 +
5290 + err = get_user(sethi, (unsigned int *)regs->tpc);
5291 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5292 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5293 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5294 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5295 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5296 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5297 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5298 +
5299 + if (err)
5300 + break;
5301 +
5302 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5303 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5304 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5305 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5306 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5307 + sllx == 0x83287020U &&
5308 + jmpl == 0x81C04005U &&
5309 + nop == 0x01000000U)
5310 + {
5311 + unsigned long addr;
5312 +
5313 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5314 + regs->u_regs[UREG_G1] <<= 32;
5315 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5316 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5317 + regs->tpc = addr;
5318 + regs->tnpc = addr+4;
5319 + return 2;
5320 + }
5321 + } while (0);
5322 +
5323 + do { /* PaX: patched PLT emulation #6 */
5324 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5325 +
5326 + err = get_user(sethi, (unsigned int *)regs->tpc);
5327 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5328 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5329 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5330 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5331 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5332 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5333 +
5334 + if (err)
5335 + break;
5336 +
5337 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5338 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5339 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5340 + sllx == 0x83287020U &&
5341 + (or & 0xFFFFE000U) == 0x8A116000U &&
5342 + jmpl == 0x81C04005U &&
5343 + nop == 0x01000000U)
5344 + {
5345 + unsigned long addr;
5346 +
5347 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5348 + regs->u_regs[UREG_G1] <<= 32;
5349 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5350 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5351 + regs->tpc = addr;
5352 + regs->tnpc = addr+4;
5353 + return 2;
5354 + }
5355 + } while (0);
5356 +
5357 + do { /* PaX: unpatched PLT emulation step 1 */
5358 + unsigned int sethi, ba, nop;
5359 +
5360 + err = get_user(sethi, (unsigned int *)regs->tpc);
5361 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5362 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5363 +
5364 + if (err)
5365 + break;
5366 +
5367 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5368 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5369 + nop == 0x01000000U)
5370 + {
5371 + unsigned long addr;
5372 + unsigned int save, call;
5373 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5374 +
5375 + if ((ba & 0xFFC00000U) == 0x30800000U)
5376 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5377 + else
5378 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5379 +
5380 + if (test_thread_flag(TIF_32BIT))
5381 + addr &= 0xFFFFFFFFUL;
5382 +
5383 + err = get_user(save, (unsigned int *)addr);
5384 + err |= get_user(call, (unsigned int *)(addr+4));
5385 + err |= get_user(nop, (unsigned int *)(addr+8));
5386 + if (err)
5387 + break;
5388 +
5389 +#ifdef CONFIG_PAX_DLRESOLVE
5390 + if (save == 0x9DE3BFA8U &&
5391 + (call & 0xC0000000U) == 0x40000000U &&
5392 + nop == 0x01000000U)
5393 + {
5394 + struct vm_area_struct *vma;
5395 + unsigned long call_dl_resolve;
5396 +
5397 + down_read(&current->mm->mmap_sem);
5398 + call_dl_resolve = current->mm->call_dl_resolve;
5399 + up_read(&current->mm->mmap_sem);
5400 + if (likely(call_dl_resolve))
5401 + goto emulate;
5402 +
5403 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5404 +
5405 + down_write(&current->mm->mmap_sem);
5406 + if (current->mm->call_dl_resolve) {
5407 + call_dl_resolve = current->mm->call_dl_resolve;
5408 + up_write(&current->mm->mmap_sem);
5409 + if (vma)
5410 + kmem_cache_free(vm_area_cachep, vma);
5411 + goto emulate;
5412 + }
5413 +
5414 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5415 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5416 + up_write(&current->mm->mmap_sem);
5417 + if (vma)
5418 + kmem_cache_free(vm_area_cachep, vma);
5419 + return 1;
5420 + }
5421 +
5422 + if (pax_insert_vma(vma, call_dl_resolve)) {
5423 + up_write(&current->mm->mmap_sem);
5424 + kmem_cache_free(vm_area_cachep, vma);
5425 + return 1;
5426 + }
5427 +
5428 + current->mm->call_dl_resolve = call_dl_resolve;
5429 + up_write(&current->mm->mmap_sem);
5430 +
5431 +emulate:
5432 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5433 + regs->tpc = call_dl_resolve;
5434 + regs->tnpc = addr+4;
5435 + return 3;
5436 + }
5437 +#endif
5438 +
5439 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5440 + if ((save & 0xFFC00000U) == 0x05000000U &&
5441 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5442 + nop == 0x01000000U)
5443 + {
5444 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5445 + regs->u_regs[UREG_G2] = addr + 4;
5446 + addr = (save & 0x003FFFFFU) << 10;
5447 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5448 +
5449 + if (test_thread_flag(TIF_32BIT))
5450 + addr &= 0xFFFFFFFFUL;
5451 +
5452 + regs->tpc = addr;
5453 + regs->tnpc = addr+4;
5454 + return 3;
5455 + }
5456 +
5457 + /* PaX: 64-bit PLT stub */
5458 + err = get_user(sethi1, (unsigned int *)addr);
5459 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5460 + err |= get_user(or1, (unsigned int *)(addr+8));
5461 + err |= get_user(or2, (unsigned int *)(addr+12));
5462 + err |= get_user(sllx, (unsigned int *)(addr+16));
5463 + err |= get_user(add, (unsigned int *)(addr+20));
5464 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5465 + err |= get_user(nop, (unsigned int *)(addr+28));
5466 + if (err)
5467 + break;
5468 +
5469 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5470 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5471 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5472 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5473 + sllx == 0x89293020U &&
5474 + add == 0x8A010005U &&
5475 + jmpl == 0x89C14000U &&
5476 + nop == 0x01000000U)
5477 + {
5478 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5479 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5480 + regs->u_regs[UREG_G4] <<= 32;
5481 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5482 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5483 + regs->u_regs[UREG_G4] = addr + 24;
5484 + addr = regs->u_regs[UREG_G5];
5485 + regs->tpc = addr;
5486 + regs->tnpc = addr+4;
5487 + return 3;
5488 + }
5489 + }
5490 + } while (0);
5491 +
5492 +#ifdef CONFIG_PAX_DLRESOLVE
5493 + do { /* PaX: unpatched PLT emulation step 2 */
5494 + unsigned int save, call, nop;
5495 +
5496 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5497 + err |= get_user(call, (unsigned int *)regs->tpc);
5498 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5499 + if (err)
5500 + break;
5501 +
5502 + if (save == 0x9DE3BFA8U &&
5503 + (call & 0xC0000000U) == 0x40000000U &&
5504 + nop == 0x01000000U)
5505 + {
5506 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5507 +
5508 + if (test_thread_flag(TIF_32BIT))
5509 + dl_resolve &= 0xFFFFFFFFUL;
5510 +
5511 + regs->u_regs[UREG_RETPC] = regs->tpc;
5512 + regs->tpc = dl_resolve;
5513 + regs->tnpc = dl_resolve+4;
5514 + return 3;
5515 + }
5516 + } while (0);
5517 +#endif
5518 +
5519 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5520 + unsigned int sethi, ba, nop;
5521 +
5522 + err = get_user(sethi, (unsigned int *)regs->tpc);
5523 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5524 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5525 +
5526 + if (err)
5527 + break;
5528 +
5529 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5530 + (ba & 0xFFF00000U) == 0x30600000U &&
5531 + nop == 0x01000000U)
5532 + {
5533 + unsigned long addr;
5534 +
5535 + addr = (sethi & 0x003FFFFFU) << 10;
5536 + regs->u_regs[UREG_G1] = addr;
5537 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5538 +
5539 + if (test_thread_flag(TIF_32BIT))
5540 + addr &= 0xFFFFFFFFUL;
5541 +
5542 + regs->tpc = addr;
5543 + regs->tnpc = addr+4;
5544 + return 2;
5545 + }
5546 + } while (0);
5547 +
5548 +#endif
5549 +
5550 + return 1;
5551 +}
5552 +
5553 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5554 +{
5555 + unsigned long i;
5556 +
5557 + printk(KERN_ERR "PAX: bytes at PC: ");
5558 + for (i = 0; i < 8; i++) {
5559 + unsigned int c;
5560 + if (get_user(c, (unsigned int *)pc+i))
5561 + printk(KERN_CONT "???????? ");
5562 + else
5563 + printk(KERN_CONT "%08x ", c);
5564 + }
5565 + printk("\n");
5566 +}
5567 +#endif
5568 +
5569 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5570 {
5571 struct mm_struct *mm = current->mm;
5572 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5573 if (!vma)
5574 goto bad_area;
5575
5576 +#ifdef CONFIG_PAX_PAGEEXEC
5577 + /* PaX: detect ITLB misses on non-exec pages */
5578 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5579 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5580 + {
5581 + if (address != regs->tpc)
5582 + goto good_area;
5583 +
5584 + up_read(&mm->mmap_sem);
5585 + switch (pax_handle_fetch_fault(regs)) {
5586 +
5587 +#ifdef CONFIG_PAX_EMUPLT
5588 + case 2:
5589 + case 3:
5590 + return;
5591 +#endif
5592 +
5593 + }
5594 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5595 + do_group_exit(SIGKILL);
5596 + }
5597 +#endif
5598 +
5599 /* Pure DTLB misses do not tell us whether the fault causing
5600 * load/store/atomic was a write or not, it only says that there
5601 * was no match. So in such a case we (carefully) read the
5602 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5603 index 07e1453..0a7d9e9 100644
5604 --- a/arch/sparc/mm/hugetlbpage.c
5605 +++ b/arch/sparc/mm/hugetlbpage.c
5606 @@ -67,7 +67,7 @@ full_search:
5607 }
5608 return -ENOMEM;
5609 }
5610 - if (likely(!vma || addr + len <= vma->vm_start)) {
5611 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5612 /*
5613 * Remember the place where we stopped the search:
5614 */
5615 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5616 /* make sure it can fit in the remaining address space */
5617 if (likely(addr > len)) {
5618 vma = find_vma(mm, addr-len);
5619 - if (!vma || addr <= vma->vm_start) {
5620 + if (check_heap_stack_gap(vma, addr - len, len)) {
5621 /* remember the address as a hint for next time */
5622 return (mm->free_area_cache = addr-len);
5623 }
5624 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5625 if (unlikely(mm->mmap_base < len))
5626 goto bottomup;
5627
5628 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5629 + addr = mm->mmap_base - len;
5630
5631 do {
5632 + addr &= HPAGE_MASK;
5633 /*
5634 * Lookup failure means no vma is above this address,
5635 * else if new region fits below vma->vm_start,
5636 * return with success:
5637 */
5638 vma = find_vma(mm, addr);
5639 - if (likely(!vma || addr+len <= vma->vm_start)) {
5640 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5641 /* remember the address as a hint for next time */
5642 return (mm->free_area_cache = addr);
5643 }
5644 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5645 mm->cached_hole_size = vma->vm_start - addr;
5646
5647 /* try just below the current vma->vm_start */
5648 - addr = (vma->vm_start-len) & HPAGE_MASK;
5649 - } while (likely(len < vma->vm_start));
5650 + addr = skip_heap_stack_gap(vma, len);
5651 + } while (!IS_ERR_VALUE(addr));
5652
5653 bottomup:
5654 /*
5655 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5656 if (addr) {
5657 addr = ALIGN(addr, HPAGE_SIZE);
5658 vma = find_vma(mm, addr);
5659 - if (task_size - len >= addr &&
5660 - (!vma || addr + len <= vma->vm_start))
5661 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5662 return addr;
5663 }
5664 if (mm->get_unmapped_area == arch_get_unmapped_area)
5665 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5666 index 7b00de6..78239f4 100644
5667 --- a/arch/sparc/mm/init_32.c
5668 +++ b/arch/sparc/mm/init_32.c
5669 @@ -316,6 +316,9 @@ extern void device_scan(void);
5670 pgprot_t PAGE_SHARED __read_mostly;
5671 EXPORT_SYMBOL(PAGE_SHARED);
5672
5673 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5674 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5675 +
5676 void __init paging_init(void)
5677 {
5678 switch(sparc_cpu_model) {
5679 @@ -344,17 +347,17 @@ void __init paging_init(void)
5680
5681 /* Initialize the protection map with non-constant, MMU dependent values. */
5682 protection_map[0] = PAGE_NONE;
5683 - protection_map[1] = PAGE_READONLY;
5684 - protection_map[2] = PAGE_COPY;
5685 - protection_map[3] = PAGE_COPY;
5686 + protection_map[1] = PAGE_READONLY_NOEXEC;
5687 + protection_map[2] = PAGE_COPY_NOEXEC;
5688 + protection_map[3] = PAGE_COPY_NOEXEC;
5689 protection_map[4] = PAGE_READONLY;
5690 protection_map[5] = PAGE_READONLY;
5691 protection_map[6] = PAGE_COPY;
5692 protection_map[7] = PAGE_COPY;
5693 protection_map[8] = PAGE_NONE;
5694 - protection_map[9] = PAGE_READONLY;
5695 - protection_map[10] = PAGE_SHARED;
5696 - protection_map[11] = PAGE_SHARED;
5697 + protection_map[9] = PAGE_READONLY_NOEXEC;
5698 + protection_map[10] = PAGE_SHARED_NOEXEC;
5699 + protection_map[11] = PAGE_SHARED_NOEXEC;
5700 protection_map[12] = PAGE_READONLY;
5701 protection_map[13] = PAGE_READONLY;
5702 protection_map[14] = PAGE_SHARED;
5703 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5704 index cbef74e..c38fead 100644
5705 --- a/arch/sparc/mm/srmmu.c
5706 +++ b/arch/sparc/mm/srmmu.c
5707 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5708 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5709 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5710 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5711 +
5712 +#ifdef CONFIG_PAX_PAGEEXEC
5713 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5714 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5715 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5716 +#endif
5717 +
5718 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5719 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5720
5721 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
5722 index 27fe667..36d474c 100644
5723 --- a/arch/tile/include/asm/atomic_64.h
5724 +++ b/arch/tile/include/asm/atomic_64.h
5725 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5726
5727 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5728
5729 +#define atomic64_read_unchecked(v) atomic64_read(v)
5730 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5731 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5732 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5733 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5734 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5735 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5736 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5737 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5738 +
5739 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
5740 #define smp_mb__before_atomic_dec() smp_mb()
5741 #define smp_mb__after_atomic_dec() smp_mb()
5742 diff --git a/arch/um/Makefile b/arch/um/Makefile
5743 index 7730af6..cce5b19 100644
5744 --- a/arch/um/Makefile
5745 +++ b/arch/um/Makefile
5746 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5747 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5748 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5749
5750 +ifdef CONSTIFY_PLUGIN
5751 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5752 +endif
5753 +
5754 #This will adjust *FLAGS accordingly to the platform.
5755 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5756
5757 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5758 index 6c03acd..a5e0215 100644
5759 --- a/arch/um/include/asm/kmap_types.h
5760 +++ b/arch/um/include/asm/kmap_types.h
5761 @@ -23,6 +23,7 @@ enum km_type {
5762 KM_IRQ1,
5763 KM_SOFTIRQ0,
5764 KM_SOFTIRQ1,
5765 + KM_CLEARPAGE,
5766 KM_TYPE_NR
5767 };
5768
5769 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5770 index 7cfc3ce..cbd1a58 100644
5771 --- a/arch/um/include/asm/page.h
5772 +++ b/arch/um/include/asm/page.h
5773 @@ -14,6 +14,9 @@
5774 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5775 #define PAGE_MASK (~(PAGE_SIZE-1))
5776
5777 +#define ktla_ktva(addr) (addr)
5778 +#define ktva_ktla(addr) (addr)
5779 +
5780 #ifndef __ASSEMBLY__
5781
5782 struct page;
5783 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5784 index c533835..84db18e 100644
5785 --- a/arch/um/kernel/process.c
5786 +++ b/arch/um/kernel/process.c
5787 @@ -406,22 +406,6 @@ int singlestepping(void * t)
5788 return 2;
5789 }
5790
5791 -/*
5792 - * Only x86 and x86_64 have an arch_align_stack().
5793 - * All other arches have "#define arch_align_stack(x) (x)"
5794 - * in their asm/system.h
5795 - * As this is included in UML from asm-um/system-generic.h,
5796 - * we can use it to behave as the subarch does.
5797 - */
5798 -#ifndef arch_align_stack
5799 -unsigned long arch_align_stack(unsigned long sp)
5800 -{
5801 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5802 - sp -= get_random_int() % 8192;
5803 - return sp & ~0xf;
5804 -}
5805 -#endif
5806 -
5807 unsigned long get_wchan(struct task_struct *p)
5808 {
5809 unsigned long stack_page, sp, ip;
5810 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5811 index efb4294..61bc18c 100644
5812 --- a/arch/x86/Kconfig
5813 +++ b/arch/x86/Kconfig
5814 @@ -235,7 +235,7 @@ config X86_HT
5815
5816 config X86_32_LAZY_GS
5817 def_bool y
5818 - depends on X86_32 && !CC_STACKPROTECTOR
5819 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5820
5821 config ARCH_HWEIGHT_CFLAGS
5822 string
5823 @@ -1022,7 +1022,7 @@ choice
5824
5825 config NOHIGHMEM
5826 bool "off"
5827 - depends on !X86_NUMAQ
5828 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5829 ---help---
5830 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5831 However, the address space of 32-bit x86 processors is only 4
5832 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
5833
5834 config HIGHMEM4G
5835 bool "4GB"
5836 - depends on !X86_NUMAQ
5837 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5838 ---help---
5839 Select this if you have a 32-bit processor and between 1 and 4
5840 gigabytes of physical RAM.
5841 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5842 hex
5843 default 0xB0000000 if VMSPLIT_3G_OPT
5844 default 0x80000000 if VMSPLIT_2G
5845 - default 0x78000000 if VMSPLIT_2G_OPT
5846 + default 0x70000000 if VMSPLIT_2G_OPT
5847 default 0x40000000 if VMSPLIT_1G
5848 default 0xC0000000
5849 depends on X86_32
5850 @@ -1496,6 +1496,7 @@ config SECCOMP
5851
5852 config CC_STACKPROTECTOR
5853 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5854 + depends on X86_64 || !PAX_MEMORY_UDEREF
5855 ---help---
5856 This option turns on the -fstack-protector GCC feature. This
5857 feature puts, at the beginning of functions, a canary value on
5858 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5859 config PHYSICAL_START
5860 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5861 default "0x1000000"
5862 + range 0x400000 0x40000000
5863 ---help---
5864 This gives the physical address where the kernel is loaded.
5865
5866 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5867 config PHYSICAL_ALIGN
5868 hex "Alignment value to which kernel should be aligned" if X86_32
5869 default "0x1000000"
5870 + range 0x400000 0x1000000 if PAX_KERNEXEC
5871 range 0x2000 0x1000000
5872 ---help---
5873 This value puts the alignment restrictions on physical address
5874 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5875 Say N if you want to disable CPU hotplug.
5876
5877 config COMPAT_VDSO
5878 - def_bool y
5879 + def_bool n
5880 prompt "Compat VDSO support"
5881 depends on X86_32 || IA32_EMULATION
5882 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5883 ---help---
5884 Map the 32-bit VDSO to the predictable old-style address too.
5885
5886 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5887 index e3ca7e0..b30b28a 100644
5888 --- a/arch/x86/Kconfig.cpu
5889 +++ b/arch/x86/Kconfig.cpu
5890 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5891
5892 config X86_F00F_BUG
5893 def_bool y
5894 - depends on M586MMX || M586TSC || M586 || M486 || M386
5895 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5896
5897 config X86_INVD_BUG
5898 def_bool y
5899 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5900
5901 config X86_ALIGNMENT_16
5902 def_bool y
5903 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5904 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5905
5906 config X86_INTEL_USERCOPY
5907 def_bool y
5908 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5909 # generates cmov.
5910 config X86_CMOV
5911 def_bool y
5912 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5913 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5914
5915 config X86_MINIMUM_CPU_FAMILY
5916 int
5917 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5918 index bf56e17..05f9891 100644
5919 --- a/arch/x86/Kconfig.debug
5920 +++ b/arch/x86/Kconfig.debug
5921 @@ -81,7 +81,7 @@ config X86_PTDUMP
5922 config DEBUG_RODATA
5923 bool "Write protect kernel read-only data structures"
5924 default y
5925 - depends on DEBUG_KERNEL
5926 + depends on DEBUG_KERNEL && BROKEN
5927 ---help---
5928 Mark the kernel read-only data as write-protected in the pagetables,
5929 in order to catch accidental (and incorrect) writes to such const
5930 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5931
5932 config DEBUG_SET_MODULE_RONX
5933 bool "Set loadable kernel module data as NX and text as RO"
5934 - depends on MODULES
5935 + depends on MODULES && BROKEN
5936 ---help---
5937 This option helps catch unintended modifications to loadable
5938 kernel module's text and read-only data. It also prevents execution
5939 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5940 index b02e509..2631e48 100644
5941 --- a/arch/x86/Makefile
5942 +++ b/arch/x86/Makefile
5943 @@ -46,6 +46,7 @@ else
5944 UTS_MACHINE := x86_64
5945 CHECKFLAGS += -D__x86_64__ -m64
5946
5947 + biarch := $(call cc-option,-m64)
5948 KBUILD_AFLAGS += -m64
5949 KBUILD_CFLAGS += -m64
5950
5951 @@ -195,3 +196,12 @@ define archhelp
5952 echo ' FDARGS="..." arguments for the booted kernel'
5953 echo ' FDINITRD=file initrd for the booted kernel'
5954 endef
5955 +
5956 +define OLD_LD
5957 +
5958 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5959 +*** Please upgrade your binutils to 2.18 or newer
5960 +endef
5961 +
5962 +archprepare:
5963 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5964 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5965 index 95365a8..52f857b 100644
5966 --- a/arch/x86/boot/Makefile
5967 +++ b/arch/x86/boot/Makefile
5968 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5969 $(call cc-option, -fno-stack-protector) \
5970 $(call cc-option, -mpreferred-stack-boundary=2)
5971 KBUILD_CFLAGS += $(call cc-option, -m32)
5972 +ifdef CONSTIFY_PLUGIN
5973 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5974 +endif
5975 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5976 GCOV_PROFILE := n
5977
5978 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5979 index 878e4b9..20537ab 100644
5980 --- a/arch/x86/boot/bitops.h
5981 +++ b/arch/x86/boot/bitops.h
5982 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5983 u8 v;
5984 const u32 *p = (const u32 *)addr;
5985
5986 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5987 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5988 return v;
5989 }
5990
5991 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5992
5993 static inline void set_bit(int nr, void *addr)
5994 {
5995 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5996 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5997 }
5998
5999 #endif /* BOOT_BITOPS_H */
6000 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
6001 index c7093bd..d4247ffe0 100644
6002 --- a/arch/x86/boot/boot.h
6003 +++ b/arch/x86/boot/boot.h
6004 @@ -85,7 +85,7 @@ static inline void io_delay(void)
6005 static inline u16 ds(void)
6006 {
6007 u16 seg;
6008 - asm("movw %%ds,%0" : "=rm" (seg));
6009 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6010 return seg;
6011 }
6012
6013 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
6014 static inline int memcmp(const void *s1, const void *s2, size_t len)
6015 {
6016 u8 diff;
6017 - asm("repe; cmpsb; setnz %0"
6018 + asm volatile("repe; cmpsb; setnz %0"
6019 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6020 return diff;
6021 }
6022 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
6023 index 09664ef..edc5d03 100644
6024 --- a/arch/x86/boot/compressed/Makefile
6025 +++ b/arch/x86/boot/compressed/Makefile
6026 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
6027 KBUILD_CFLAGS += $(cflags-y)
6028 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6029 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6030 +ifdef CONSTIFY_PLUGIN
6031 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6032 +endif
6033
6034 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6035 GCOV_PROFILE := n
6036 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
6037 index 67a655a..b924059 100644
6038 --- a/arch/x86/boot/compressed/head_32.S
6039 +++ b/arch/x86/boot/compressed/head_32.S
6040 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6041 notl %eax
6042 andl %eax, %ebx
6043 #else
6044 - movl $LOAD_PHYSICAL_ADDR, %ebx
6045 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6046 #endif
6047
6048 /* Target address to relocate to for decompression */
6049 @@ -162,7 +162,7 @@ relocated:
6050 * and where it was actually loaded.
6051 */
6052 movl %ebp, %ebx
6053 - subl $LOAD_PHYSICAL_ADDR, %ebx
6054 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6055 jz 2f /* Nothing to be done if loaded at compiled addr. */
6056 /*
6057 * Process relocations.
6058 @@ -170,8 +170,7 @@ relocated:
6059
6060 1: subl $4, %edi
6061 movl (%edi), %ecx
6062 - testl %ecx, %ecx
6063 - jz 2f
6064 + jecxz 2f
6065 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6066 jmp 1b
6067 2:
6068 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
6069 index 35af09d..99c9676 100644
6070 --- a/arch/x86/boot/compressed/head_64.S
6071 +++ b/arch/x86/boot/compressed/head_64.S
6072 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6073 notl %eax
6074 andl %eax, %ebx
6075 #else
6076 - movl $LOAD_PHYSICAL_ADDR, %ebx
6077 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6078 #endif
6079
6080 /* Target address to relocate to for decompression */
6081 @@ -233,7 +233,7 @@ ENTRY(startup_64)
6082 notq %rax
6083 andq %rax, %rbp
6084 #else
6085 - movq $LOAD_PHYSICAL_ADDR, %rbp
6086 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6087 #endif
6088
6089 /* Target address to relocate to for decompression */
6090 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
6091 index 3a19d04..7c1d55a 100644
6092 --- a/arch/x86/boot/compressed/misc.c
6093 +++ b/arch/x86/boot/compressed/misc.c
6094 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
6095 case PT_LOAD:
6096 #ifdef CONFIG_RELOCATABLE
6097 dest = output;
6098 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6099 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6100 #else
6101 dest = (void *)(phdr->p_paddr);
6102 #endif
6103 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
6104 error("Destination address too large");
6105 #endif
6106 #ifndef CONFIG_RELOCATABLE
6107 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6108 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6109 error("Wrong destination address");
6110 #endif
6111
6112 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
6113 index 89bbf4e..869908e 100644
6114 --- a/arch/x86/boot/compressed/relocs.c
6115 +++ b/arch/x86/boot/compressed/relocs.c
6116 @@ -13,8 +13,11 @@
6117
6118 static void die(char *fmt, ...);
6119
6120 +#include "../../../../include/generated/autoconf.h"
6121 +
6122 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6123 static Elf32_Ehdr ehdr;
6124 +static Elf32_Phdr *phdr;
6125 static unsigned long reloc_count, reloc_idx;
6126 static unsigned long *relocs;
6127
6128 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
6129 }
6130 }
6131
6132 +static void read_phdrs(FILE *fp)
6133 +{
6134 + unsigned int i;
6135 +
6136 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6137 + if (!phdr) {
6138 + die("Unable to allocate %d program headers\n",
6139 + ehdr.e_phnum);
6140 + }
6141 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6142 + die("Seek to %d failed: %s\n",
6143 + ehdr.e_phoff, strerror(errno));
6144 + }
6145 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6146 + die("Cannot read ELF program headers: %s\n",
6147 + strerror(errno));
6148 + }
6149 + for(i = 0; i < ehdr.e_phnum; i++) {
6150 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6151 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6152 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6153 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6154 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6155 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6156 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6157 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6158 + }
6159 +
6160 +}
6161 +
6162 static void read_shdrs(FILE *fp)
6163 {
6164 - int i;
6165 + unsigned int i;
6166 Elf32_Shdr shdr;
6167
6168 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6169 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
6170
6171 static void read_strtabs(FILE *fp)
6172 {
6173 - int i;
6174 + unsigned int i;
6175 for (i = 0; i < ehdr.e_shnum; i++) {
6176 struct section *sec = &secs[i];
6177 if (sec->shdr.sh_type != SHT_STRTAB) {
6178 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
6179
6180 static void read_symtabs(FILE *fp)
6181 {
6182 - int i,j;
6183 + unsigned int i,j;
6184 for (i = 0; i < ehdr.e_shnum; i++) {
6185 struct section *sec = &secs[i];
6186 if (sec->shdr.sh_type != SHT_SYMTAB) {
6187 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
6188
6189 static void read_relocs(FILE *fp)
6190 {
6191 - int i,j;
6192 + unsigned int i,j;
6193 + uint32_t base;
6194 +
6195 for (i = 0; i < ehdr.e_shnum; i++) {
6196 struct section *sec = &secs[i];
6197 if (sec->shdr.sh_type != SHT_REL) {
6198 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6199 die("Cannot read symbol table: %s\n",
6200 strerror(errno));
6201 }
6202 + base = 0;
6203 + for (j = 0; j < ehdr.e_phnum; j++) {
6204 + if (phdr[j].p_type != PT_LOAD )
6205 + continue;
6206 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6207 + continue;
6208 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6209 + break;
6210 + }
6211 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6212 Elf32_Rel *rel = &sec->reltab[j];
6213 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6214 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6215 rel->r_info = elf32_to_cpu(rel->r_info);
6216 }
6217 }
6218 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6219
6220 static void print_absolute_symbols(void)
6221 {
6222 - int i;
6223 + unsigned int i;
6224 printf("Absolute symbols\n");
6225 printf(" Num: Value Size Type Bind Visibility Name\n");
6226 for (i = 0; i < ehdr.e_shnum; i++) {
6227 struct section *sec = &secs[i];
6228 char *sym_strtab;
6229 Elf32_Sym *sh_symtab;
6230 - int j;
6231 + unsigned int j;
6232
6233 if (sec->shdr.sh_type != SHT_SYMTAB) {
6234 continue;
6235 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6236
6237 static void print_absolute_relocs(void)
6238 {
6239 - int i, printed = 0;
6240 + unsigned int i, printed = 0;
6241
6242 for (i = 0; i < ehdr.e_shnum; i++) {
6243 struct section *sec = &secs[i];
6244 struct section *sec_applies, *sec_symtab;
6245 char *sym_strtab;
6246 Elf32_Sym *sh_symtab;
6247 - int j;
6248 + unsigned int j;
6249 if (sec->shdr.sh_type != SHT_REL) {
6250 continue;
6251 }
6252 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6253
6254 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6255 {
6256 - int i;
6257 + unsigned int i;
6258 /* Walk through the relocations */
6259 for (i = 0; i < ehdr.e_shnum; i++) {
6260 char *sym_strtab;
6261 Elf32_Sym *sh_symtab;
6262 struct section *sec_applies, *sec_symtab;
6263 - int j;
6264 + unsigned int j;
6265 struct section *sec = &secs[i];
6266
6267 if (sec->shdr.sh_type != SHT_REL) {
6268 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6269 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6270 continue;
6271 }
6272 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6273 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6274 + continue;
6275 +
6276 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6277 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6278 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6279 + continue;
6280 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6281 + continue;
6282 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6283 + continue;
6284 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6285 + continue;
6286 +#endif
6287 +
6288 switch (r_type) {
6289 case R_386_NONE:
6290 case R_386_PC32:
6291 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6292
6293 static void emit_relocs(int as_text)
6294 {
6295 - int i;
6296 + unsigned int i;
6297 /* Count how many relocations I have and allocate space for them. */
6298 reloc_count = 0;
6299 walk_relocs(count_reloc);
6300 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6301 fname, strerror(errno));
6302 }
6303 read_ehdr(fp);
6304 + read_phdrs(fp);
6305 read_shdrs(fp);
6306 read_strtabs(fp);
6307 read_symtabs(fp);
6308 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6309 index 4d3ff03..e4972ff 100644
6310 --- a/arch/x86/boot/cpucheck.c
6311 +++ b/arch/x86/boot/cpucheck.c
6312 @@ -74,7 +74,7 @@ static int has_fpu(void)
6313 u16 fcw = -1, fsw = -1;
6314 u32 cr0;
6315
6316 - asm("movl %%cr0,%0" : "=r" (cr0));
6317 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6318 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6319 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6320 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6321 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6322 {
6323 u32 f0, f1;
6324
6325 - asm("pushfl ; "
6326 + asm volatile("pushfl ; "
6327 "pushfl ; "
6328 "popl %0 ; "
6329 "movl %0,%1 ; "
6330 @@ -115,7 +115,7 @@ static void get_flags(void)
6331 set_bit(X86_FEATURE_FPU, cpu.flags);
6332
6333 if (has_eflag(X86_EFLAGS_ID)) {
6334 - asm("cpuid"
6335 + asm volatile("cpuid"
6336 : "=a" (max_intel_level),
6337 "=b" (cpu_vendor[0]),
6338 "=d" (cpu_vendor[1]),
6339 @@ -124,7 +124,7 @@ static void get_flags(void)
6340
6341 if (max_intel_level >= 0x00000001 &&
6342 max_intel_level <= 0x0000ffff) {
6343 - asm("cpuid"
6344 + asm volatile("cpuid"
6345 : "=a" (tfms),
6346 "=c" (cpu.flags[4]),
6347 "=d" (cpu.flags[0])
6348 @@ -136,7 +136,7 @@ static void get_flags(void)
6349 cpu.model += ((tfms >> 16) & 0xf) << 4;
6350 }
6351
6352 - asm("cpuid"
6353 + asm volatile("cpuid"
6354 : "=a" (max_amd_level)
6355 : "a" (0x80000000)
6356 : "ebx", "ecx", "edx");
6357 @@ -144,7 +144,7 @@ static void get_flags(void)
6358 if (max_amd_level >= 0x80000001 &&
6359 max_amd_level <= 0x8000ffff) {
6360 u32 eax = 0x80000001;
6361 - asm("cpuid"
6362 + asm volatile("cpuid"
6363 : "+a" (eax),
6364 "=c" (cpu.flags[6]),
6365 "=d" (cpu.flags[1])
6366 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6367 u32 ecx = MSR_K7_HWCR;
6368 u32 eax, edx;
6369
6370 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6371 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6372 eax &= ~(1 << 15);
6373 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6374 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6375
6376 get_flags(); /* Make sure it really did something */
6377 err = check_flags();
6378 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6379 u32 ecx = MSR_VIA_FCR;
6380 u32 eax, edx;
6381
6382 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6383 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6384 eax |= (1<<1)|(1<<7);
6385 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6386 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6387
6388 set_bit(X86_FEATURE_CX8, cpu.flags);
6389 err = check_flags();
6390 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6391 u32 eax, edx;
6392 u32 level = 1;
6393
6394 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6395 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6396 - asm("cpuid"
6397 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6398 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6399 + asm volatile("cpuid"
6400 : "+a" (level), "=d" (cpu.flags[0])
6401 : : "ecx", "ebx");
6402 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6403 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6404
6405 err = check_flags();
6406 }
6407 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6408 index bdb4d45..0476680 100644
6409 --- a/arch/x86/boot/header.S
6410 +++ b/arch/x86/boot/header.S
6411 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6412 # single linked list of
6413 # struct setup_data
6414
6415 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6416 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6417
6418 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6419 #define VO_INIT_SIZE (VO__end - VO__text)
6420 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6421 index db75d07..8e6d0af 100644
6422 --- a/arch/x86/boot/memory.c
6423 +++ b/arch/x86/boot/memory.c
6424 @@ -19,7 +19,7 @@
6425
6426 static int detect_memory_e820(void)
6427 {
6428 - int count = 0;
6429 + unsigned int count = 0;
6430 struct biosregs ireg, oreg;
6431 struct e820entry *desc = boot_params.e820_map;
6432 static struct e820entry buf; /* static so it is zeroed */
6433 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6434 index 11e8c6e..fdbb1ed 100644
6435 --- a/arch/x86/boot/video-vesa.c
6436 +++ b/arch/x86/boot/video-vesa.c
6437 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6438
6439 boot_params.screen_info.vesapm_seg = oreg.es;
6440 boot_params.screen_info.vesapm_off = oreg.di;
6441 + boot_params.screen_info.vesapm_size = oreg.cx;
6442 }
6443
6444 /*
6445 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6446 index 43eda28..5ab5fdb 100644
6447 --- a/arch/x86/boot/video.c
6448 +++ b/arch/x86/boot/video.c
6449 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6450 static unsigned int get_entry(void)
6451 {
6452 char entry_buf[4];
6453 - int i, len = 0;
6454 + unsigned int i, len = 0;
6455 int key;
6456 unsigned int v;
6457
6458 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6459 index 5b577d5..3c1fed4 100644
6460 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6461 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6462 @@ -8,6 +8,8 @@
6463 * including this sentence is retained in full.
6464 */
6465
6466 +#include <asm/alternative-asm.h>
6467 +
6468 .extern crypto_ft_tab
6469 .extern crypto_it_tab
6470 .extern crypto_fl_tab
6471 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6472 je B192; \
6473 leaq 32(r9),r9;
6474
6475 +#define ret pax_force_retaddr 0, 1; ret
6476 +
6477 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6478 movq r1,r2; \
6479 movq r3,r4; \
6480 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6481 index be6d9e3..21fbbca 100644
6482 --- a/arch/x86/crypto/aesni-intel_asm.S
6483 +++ b/arch/x86/crypto/aesni-intel_asm.S
6484 @@ -31,6 +31,7 @@
6485
6486 #include <linux/linkage.h>
6487 #include <asm/inst.h>
6488 +#include <asm/alternative-asm.h>
6489
6490 #ifdef __x86_64__
6491 .data
6492 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6493 pop %r14
6494 pop %r13
6495 pop %r12
6496 + pax_force_retaddr 0, 1
6497 ret
6498 +ENDPROC(aesni_gcm_dec)
6499
6500
6501 /*****************************************************************************
6502 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6503 pop %r14
6504 pop %r13
6505 pop %r12
6506 + pax_force_retaddr 0, 1
6507 ret
6508 +ENDPROC(aesni_gcm_enc)
6509
6510 #endif
6511
6512 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6513 pxor %xmm1, %xmm0
6514 movaps %xmm0, (TKEYP)
6515 add $0x10, TKEYP
6516 + pax_force_retaddr_bts
6517 ret
6518
6519 .align 4
6520 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6521 shufps $0b01001110, %xmm2, %xmm1
6522 movaps %xmm1, 0x10(TKEYP)
6523 add $0x20, TKEYP
6524 + pax_force_retaddr_bts
6525 ret
6526
6527 .align 4
6528 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6529
6530 movaps %xmm0, (TKEYP)
6531 add $0x10, TKEYP
6532 + pax_force_retaddr_bts
6533 ret
6534
6535 .align 4
6536 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6537 pxor %xmm1, %xmm2
6538 movaps %xmm2, (TKEYP)
6539 add $0x10, TKEYP
6540 + pax_force_retaddr_bts
6541 ret
6542
6543 /*
6544 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6545 #ifndef __x86_64__
6546 popl KEYP
6547 #endif
6548 + pax_force_retaddr 0, 1
6549 ret
6550 +ENDPROC(aesni_set_key)
6551
6552 /*
6553 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6554 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6555 popl KLEN
6556 popl KEYP
6557 #endif
6558 + pax_force_retaddr 0, 1
6559 ret
6560 +ENDPROC(aesni_enc)
6561
6562 /*
6563 * _aesni_enc1: internal ABI
6564 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6565 AESENC KEY STATE
6566 movaps 0x70(TKEYP), KEY
6567 AESENCLAST KEY STATE
6568 + pax_force_retaddr_bts
6569 ret
6570
6571 /*
6572 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6573 AESENCLAST KEY STATE2
6574 AESENCLAST KEY STATE3
6575 AESENCLAST KEY STATE4
6576 + pax_force_retaddr_bts
6577 ret
6578
6579 /*
6580 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6581 popl KLEN
6582 popl KEYP
6583 #endif
6584 + pax_force_retaddr 0, 1
6585 ret
6586 +ENDPROC(aesni_dec)
6587
6588 /*
6589 * _aesni_dec1: internal ABI
6590 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6591 AESDEC KEY STATE
6592 movaps 0x70(TKEYP), KEY
6593 AESDECLAST KEY STATE
6594 + pax_force_retaddr_bts
6595 ret
6596
6597 /*
6598 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6599 AESDECLAST KEY STATE2
6600 AESDECLAST KEY STATE3
6601 AESDECLAST KEY STATE4
6602 + pax_force_retaddr_bts
6603 ret
6604
6605 /*
6606 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6607 popl KEYP
6608 popl LEN
6609 #endif
6610 + pax_force_retaddr 0, 1
6611 ret
6612 +ENDPROC(aesni_ecb_enc)
6613
6614 /*
6615 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6616 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6617 popl KEYP
6618 popl LEN
6619 #endif
6620 + pax_force_retaddr 0, 1
6621 ret
6622 +ENDPROC(aesni_ecb_dec)
6623
6624 /*
6625 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6626 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6627 popl LEN
6628 popl IVP
6629 #endif
6630 + pax_force_retaddr 0, 1
6631 ret
6632 +ENDPROC(aesni_cbc_enc)
6633
6634 /*
6635 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6636 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6637 popl LEN
6638 popl IVP
6639 #endif
6640 + pax_force_retaddr 0, 1
6641 ret
6642 +ENDPROC(aesni_cbc_dec)
6643
6644 #ifdef __x86_64__
6645 .align 16
6646 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6647 mov $1, TCTR_LOW
6648 MOVQ_R64_XMM TCTR_LOW INC
6649 MOVQ_R64_XMM CTR TCTR_LOW
6650 + pax_force_retaddr_bts
6651 ret
6652
6653 /*
6654 @@ -2552,6 +2580,7 @@ _aesni_inc:
6655 .Linc_low:
6656 movaps CTR, IV
6657 PSHUFB_XMM BSWAP_MASK IV
6658 + pax_force_retaddr_bts
6659 ret
6660
6661 /*
6662 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6663 .Lctr_enc_ret:
6664 movups IV, (IVP)
6665 .Lctr_enc_just_ret:
6666 + pax_force_retaddr 0, 1
6667 ret
6668 +ENDPROC(aesni_ctr_enc)
6669 #endif
6670 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6671 index 391d245..67f35c2 100644
6672 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6673 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6674 @@ -20,6 +20,8 @@
6675 *
6676 */
6677
6678 +#include <asm/alternative-asm.h>
6679 +
6680 .file "blowfish-x86_64-asm.S"
6681 .text
6682
6683 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
6684 jnz __enc_xor;
6685
6686 write_block();
6687 + pax_force_retaddr 0, 1
6688 ret;
6689 __enc_xor:
6690 xor_block();
6691 + pax_force_retaddr 0, 1
6692 ret;
6693
6694 .align 8
6695 @@ -188,6 +192,7 @@ blowfish_dec_blk:
6696
6697 movq %r11, %rbp;
6698
6699 + pax_force_retaddr 0, 1
6700 ret;
6701
6702 /**********************************************************************
6703 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6704
6705 popq %rbx;
6706 popq %rbp;
6707 + pax_force_retaddr 0, 1
6708 ret;
6709
6710 __enc_xor4:
6711 @@ -349,6 +355,7 @@ __enc_xor4:
6712
6713 popq %rbx;
6714 popq %rbp;
6715 + pax_force_retaddr 0, 1
6716 ret;
6717
6718 .align 8
6719 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6720 popq %rbx;
6721 popq %rbp;
6722
6723 + pax_force_retaddr 0, 1
6724 ret;
6725
6726 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6727 index 6214a9b..1f4fc9a 100644
6728 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6729 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6730 @@ -1,3 +1,5 @@
6731 +#include <asm/alternative-asm.h>
6732 +
6733 # enter ECRYPT_encrypt_bytes
6734 .text
6735 .p2align 5
6736 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6737 add %r11,%rsp
6738 mov %rdi,%rax
6739 mov %rsi,%rdx
6740 + pax_force_retaddr 0, 1
6741 ret
6742 # bytesatleast65:
6743 ._bytesatleast65:
6744 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6745 add %r11,%rsp
6746 mov %rdi,%rax
6747 mov %rsi,%rdx
6748 + pax_force_retaddr
6749 ret
6750 # enter ECRYPT_ivsetup
6751 .text
6752 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6753 add %r11,%rsp
6754 mov %rdi,%rax
6755 mov %rsi,%rdx
6756 + pax_force_retaddr
6757 ret
6758 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6759 index b2c2f57..8470cab 100644
6760 --- a/arch/x86/crypto/sha1_ssse3_asm.S
6761 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
6762 @@ -28,6 +28,8 @@
6763 * (at your option) any later version.
6764 */
6765
6766 +#include <asm/alternative-asm.h>
6767 +
6768 #define CTX %rdi // arg1
6769 #define BUF %rsi // arg2
6770 #define CNT %rdx // arg3
6771 @@ -104,6 +106,7 @@
6772 pop %r12
6773 pop %rbp
6774 pop %rbx
6775 + pax_force_retaddr 0, 1
6776 ret
6777
6778 .size \name, .-\name
6779 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6780 index 5b012a2..36d5364 100644
6781 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6782 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6783 @@ -20,6 +20,8 @@
6784 *
6785 */
6786
6787 +#include <asm/alternative-asm.h>
6788 +
6789 .file "twofish-x86_64-asm-3way.S"
6790 .text
6791
6792 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6793 popq %r13;
6794 popq %r14;
6795 popq %r15;
6796 + pax_force_retaddr 0, 1
6797 ret;
6798
6799 __enc_xor3:
6800 @@ -271,6 +274,7 @@ __enc_xor3:
6801 popq %r13;
6802 popq %r14;
6803 popq %r15;
6804 + pax_force_retaddr 0, 1
6805 ret;
6806
6807 .global twofish_dec_blk_3way
6808 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6809 popq %r13;
6810 popq %r14;
6811 popq %r15;
6812 + pax_force_retaddr 0, 1
6813 ret;
6814
6815 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6816 index 7bcf3fc..f53832f 100644
6817 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6818 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6819 @@ -21,6 +21,7 @@
6820 .text
6821
6822 #include <asm/asm-offsets.h>
6823 +#include <asm/alternative-asm.h>
6824
6825 #define a_offset 0
6826 #define b_offset 4
6827 @@ -268,6 +269,7 @@ twofish_enc_blk:
6828
6829 popq R1
6830 movq $1,%rax
6831 + pax_force_retaddr 0, 1
6832 ret
6833
6834 twofish_dec_blk:
6835 @@ -319,4 +321,5 @@ twofish_dec_blk:
6836
6837 popq R1
6838 movq $1,%rax
6839 + pax_force_retaddr 0, 1
6840 ret
6841 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6842 index fd84387..0b4af7d 100644
6843 --- a/arch/x86/ia32/ia32_aout.c
6844 +++ b/arch/x86/ia32/ia32_aout.c
6845 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6846 unsigned long dump_start, dump_size;
6847 struct user32 dump;
6848
6849 + memset(&dump, 0, sizeof(dump));
6850 +
6851 fs = get_fs();
6852 set_fs(KERNEL_DS);
6853 has_dumped = 1;
6854 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6855 index 6557769..ef6ae89 100644
6856 --- a/arch/x86/ia32/ia32_signal.c
6857 +++ b/arch/x86/ia32/ia32_signal.c
6858 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6859 }
6860 seg = get_fs();
6861 set_fs(KERNEL_DS);
6862 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6863 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6864 set_fs(seg);
6865 if (ret >= 0 && uoss_ptr) {
6866 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6867 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6868 */
6869 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6870 size_t frame_size,
6871 - void **fpstate)
6872 + void __user **fpstate)
6873 {
6874 unsigned long sp;
6875
6876 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6877
6878 if (used_math()) {
6879 sp = sp - sig_xstate_ia32_size;
6880 - *fpstate = (struct _fpstate_ia32 *) sp;
6881 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6882 if (save_i387_xstate_ia32(*fpstate) < 0)
6883 return (void __user *) -1L;
6884 }
6885 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6886 sp -= frame_size;
6887 /* Align the stack pointer according to the i386 ABI,
6888 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6889 - sp = ((sp + 4) & -16ul) - 4;
6890 + sp = ((sp - 12) & -16ul) - 4;
6891 return (void __user *) sp;
6892 }
6893
6894 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6895 * These are actually not used anymore, but left because some
6896 * gdb versions depend on them as a marker.
6897 */
6898 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6899 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6900 } put_user_catch(err);
6901
6902 if (err)
6903 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6904 0xb8,
6905 __NR_ia32_rt_sigreturn,
6906 0x80cd,
6907 - 0,
6908 + 0
6909 };
6910
6911 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6912 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6913
6914 if (ka->sa.sa_flags & SA_RESTORER)
6915 restorer = ka->sa.sa_restorer;
6916 + else if (current->mm->context.vdso)
6917 + /* Return stub is in 32bit vsyscall page */
6918 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6919 else
6920 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6921 - rt_sigreturn);
6922 + restorer = &frame->retcode;
6923 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6924
6925 /*
6926 * Not actually used anymore, but left because some gdb
6927 * versions need it.
6928 */
6929 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6930 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6931 } put_user_catch(err);
6932
6933 if (err)
6934 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6935 index a6253ec..4ad2120 100644
6936 --- a/arch/x86/ia32/ia32entry.S
6937 +++ b/arch/x86/ia32/ia32entry.S
6938 @@ -13,7 +13,9 @@
6939 #include <asm/thread_info.h>
6940 #include <asm/segment.h>
6941 #include <asm/irqflags.h>
6942 +#include <asm/pgtable.h>
6943 #include <linux/linkage.h>
6944 +#include <asm/alternative-asm.h>
6945
6946 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6947 #include <linux/elf-em.h>
6948 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6949 ENDPROC(native_irq_enable_sysexit)
6950 #endif
6951
6952 + .macro pax_enter_kernel_user
6953 + pax_set_fptr_mask
6954 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6955 + call pax_enter_kernel_user
6956 +#endif
6957 + .endm
6958 +
6959 + .macro pax_exit_kernel_user
6960 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6961 + call pax_exit_kernel_user
6962 +#endif
6963 +#ifdef CONFIG_PAX_RANDKSTACK
6964 + pushq %rax
6965 + pushq %r11
6966 + call pax_randomize_kstack
6967 + popq %r11
6968 + popq %rax
6969 +#endif
6970 + .endm
6971 +
6972 +.macro pax_erase_kstack
6973 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6974 + call pax_erase_kstack
6975 +#endif
6976 +.endm
6977 +
6978 /*
6979 * 32bit SYSENTER instruction entry.
6980 *
6981 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6982 CFI_REGISTER rsp,rbp
6983 SWAPGS_UNSAFE_STACK
6984 movq PER_CPU_VAR(kernel_stack), %rsp
6985 - addq $(KERNEL_STACK_OFFSET),%rsp
6986 - /*
6987 - * No need to follow this irqs on/off section: the syscall
6988 - * disabled irqs, here we enable it straight after entry:
6989 - */
6990 - ENABLE_INTERRUPTS(CLBR_NONE)
6991 movl %ebp,%ebp /* zero extension */
6992 pushq_cfi $__USER32_DS
6993 /*CFI_REL_OFFSET ss,0*/
6994 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6995 CFI_REL_OFFSET rsp,0
6996 pushfq_cfi
6997 /*CFI_REL_OFFSET rflags,0*/
6998 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6999 - CFI_REGISTER rip,r10
7000 + orl $X86_EFLAGS_IF,(%rsp)
7001 + GET_THREAD_INFO(%r11)
7002 + movl TI_sysenter_return(%r11), %r11d
7003 + CFI_REGISTER rip,r11
7004 pushq_cfi $__USER32_CS
7005 /*CFI_REL_OFFSET cs,0*/
7006 movl %eax, %eax
7007 - pushq_cfi %r10
7008 + pushq_cfi %r11
7009 CFI_REL_OFFSET rip,0
7010 pushq_cfi %rax
7011 cld
7012 SAVE_ARGS 0,1,0
7013 + pax_enter_kernel_user
7014 + /*
7015 + * No need to follow this irqs on/off section: the syscall
7016 + * disabled irqs, here we enable it straight after entry:
7017 + */
7018 + ENABLE_INTERRUPTS(CLBR_NONE)
7019 /* no need to do an access_ok check here because rbp has been
7020 32bit zero extended */
7021 +
7022 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7023 + mov $PAX_USER_SHADOW_BASE,%r11
7024 + add %r11,%rbp
7025 +#endif
7026 +
7027 1: movl (%rbp),%ebp
7028 .section __ex_table,"a"
7029 .quad 1b,ia32_badarg
7030 .previous
7031 - GET_THREAD_INFO(%r10)
7032 - orl $TS_COMPAT,TI_status(%r10)
7033 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7034 + GET_THREAD_INFO(%r11)
7035 + orl $TS_COMPAT,TI_status(%r11)
7036 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7037 CFI_REMEMBER_STATE
7038 jnz sysenter_tracesys
7039 cmpq $(IA32_NR_syscalls-1),%rax
7040 @@ -162,13 +198,15 @@ sysenter_do_call:
7041 sysenter_dispatch:
7042 call *ia32_sys_call_table(,%rax,8)
7043 movq %rax,RAX-ARGOFFSET(%rsp)
7044 - GET_THREAD_INFO(%r10)
7045 + GET_THREAD_INFO(%r11)
7046 DISABLE_INTERRUPTS(CLBR_NONE)
7047 TRACE_IRQS_OFF
7048 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7049 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7050 jnz sysexit_audit
7051 sysexit_from_sys_call:
7052 - andl $~TS_COMPAT,TI_status(%r10)
7053 + pax_exit_kernel_user
7054 + pax_erase_kstack
7055 + andl $~TS_COMPAT,TI_status(%r11)
7056 /* clear IF, that popfq doesn't enable interrupts early */
7057 andl $~0x200,EFLAGS-R11(%rsp)
7058 movl RIP-R11(%rsp),%edx /* User %eip */
7059 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
7060 movl %eax,%esi /* 2nd arg: syscall number */
7061 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7062 call audit_syscall_entry
7063 +
7064 + pax_erase_kstack
7065 +
7066 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7067 cmpq $(IA32_NR_syscalls-1),%rax
7068 ja ia32_badsys
7069 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
7070 .endm
7071
7072 .macro auditsys_exit exit
7073 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7074 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7075 jnz ia32_ret_from_sys_call
7076 TRACE_IRQS_ON
7077 sti
7078 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
7079 movzbl %al,%edi /* zero-extend that into %edi */
7080 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7081 call audit_syscall_exit
7082 - GET_THREAD_INFO(%r10)
7083 + GET_THREAD_INFO(%r11)
7084 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7085 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7086 cli
7087 TRACE_IRQS_OFF
7088 - testl %edi,TI_flags(%r10)
7089 + testl %edi,TI_flags(%r11)
7090 jz \exit
7091 CLEAR_RREGS -ARGOFFSET
7092 jmp int_with_check
7093 @@ -238,7 +279,7 @@ sysexit_audit:
7094
7095 sysenter_tracesys:
7096 #ifdef CONFIG_AUDITSYSCALL
7097 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7098 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7099 jz sysenter_auditsys
7100 #endif
7101 SAVE_REST
7102 @@ -246,6 +287,9 @@ sysenter_tracesys:
7103 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
7104 movq %rsp,%rdi /* &pt_regs -> arg1 */
7105 call syscall_trace_enter
7106 +
7107 + pax_erase_kstack
7108 +
7109 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7110 RESTORE_REST
7111 cmpq $(IA32_NR_syscalls-1),%rax
7112 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
7113 ENTRY(ia32_cstar_target)
7114 CFI_STARTPROC32 simple
7115 CFI_SIGNAL_FRAME
7116 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
7117 + CFI_DEF_CFA rsp,0
7118 CFI_REGISTER rip,rcx
7119 /*CFI_REGISTER rflags,r11*/
7120 SWAPGS_UNSAFE_STACK
7121 movl %esp,%r8d
7122 CFI_REGISTER rsp,r8
7123 movq PER_CPU_VAR(kernel_stack),%rsp
7124 + SAVE_ARGS 8*6,0,0
7125 + pax_enter_kernel_user
7126 /*
7127 * No need to follow this irqs on/off section: the syscall
7128 * disabled irqs and here we enable it straight after entry:
7129 */
7130 ENABLE_INTERRUPTS(CLBR_NONE)
7131 - SAVE_ARGS 8,0,0
7132 movl %eax,%eax /* zero extension */
7133 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7134 movq %rcx,RIP-ARGOFFSET(%rsp)
7135 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
7136 /* no need to do an access_ok check here because r8 has been
7137 32bit zero extended */
7138 /* hardware stack frame is complete now */
7139 +
7140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7141 + mov $PAX_USER_SHADOW_BASE,%r11
7142 + add %r11,%r8
7143 +#endif
7144 +
7145 1: movl (%r8),%r9d
7146 .section __ex_table,"a"
7147 .quad 1b,ia32_badarg
7148 .previous
7149 - GET_THREAD_INFO(%r10)
7150 - orl $TS_COMPAT,TI_status(%r10)
7151 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7152 + GET_THREAD_INFO(%r11)
7153 + orl $TS_COMPAT,TI_status(%r11)
7154 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7155 CFI_REMEMBER_STATE
7156 jnz cstar_tracesys
7157 cmpq $IA32_NR_syscalls-1,%rax
7158 @@ -321,13 +372,15 @@ cstar_do_call:
7159 cstar_dispatch:
7160 call *ia32_sys_call_table(,%rax,8)
7161 movq %rax,RAX-ARGOFFSET(%rsp)
7162 - GET_THREAD_INFO(%r10)
7163 + GET_THREAD_INFO(%r11)
7164 DISABLE_INTERRUPTS(CLBR_NONE)
7165 TRACE_IRQS_OFF
7166 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7167 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7168 jnz sysretl_audit
7169 sysretl_from_sys_call:
7170 - andl $~TS_COMPAT,TI_status(%r10)
7171 + pax_exit_kernel_user
7172 + pax_erase_kstack
7173 + andl $~TS_COMPAT,TI_status(%r11)
7174 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
7175 movl RIP-ARGOFFSET(%rsp),%ecx
7176 CFI_REGISTER rip,rcx
7177 @@ -355,7 +408,7 @@ sysretl_audit:
7178
7179 cstar_tracesys:
7180 #ifdef CONFIG_AUDITSYSCALL
7181 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7182 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7183 jz cstar_auditsys
7184 #endif
7185 xchgl %r9d,%ebp
7186 @@ -364,6 +417,9 @@ cstar_tracesys:
7187 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7188 movq %rsp,%rdi /* &pt_regs -> arg1 */
7189 call syscall_trace_enter
7190 +
7191 + pax_erase_kstack
7192 +
7193 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
7194 RESTORE_REST
7195 xchgl %ebp,%r9d
7196 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
7197 CFI_REL_OFFSET rip,RIP-RIP
7198 PARAVIRT_ADJUST_EXCEPTION_FRAME
7199 SWAPGS
7200 - /*
7201 - * No need to follow this irqs on/off section: the syscall
7202 - * disabled irqs and here we enable it straight after entry:
7203 - */
7204 - ENABLE_INTERRUPTS(CLBR_NONE)
7205 movl %eax,%eax
7206 pushq_cfi %rax
7207 cld
7208 /* note the registers are not zero extended to the sf.
7209 this could be a problem. */
7210 SAVE_ARGS 0,1,0
7211 - GET_THREAD_INFO(%r10)
7212 - orl $TS_COMPAT,TI_status(%r10)
7213 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7214 + pax_enter_kernel_user
7215 + /*
7216 + * No need to follow this irqs on/off section: the syscall
7217 + * disabled irqs and here we enable it straight after entry:
7218 + */
7219 + ENABLE_INTERRUPTS(CLBR_NONE)
7220 + GET_THREAD_INFO(%r11)
7221 + orl $TS_COMPAT,TI_status(%r11)
7222 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7223 jnz ia32_tracesys
7224 cmpq $(IA32_NR_syscalls-1),%rax
7225 ja ia32_badsys
7226 @@ -441,6 +498,9 @@ ia32_tracesys:
7227 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7228 movq %rsp,%rdi /* &pt_regs -> arg1 */
7229 call syscall_trace_enter
7230 +
7231 + pax_erase_kstack
7232 +
7233 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7234 RESTORE_REST
7235 cmpq $(IA32_NR_syscalls-1),%rax
7236 @@ -455,6 +515,7 @@ ia32_badsys:
7237
7238 quiet_ni_syscall:
7239 movq $-ENOSYS,%rax
7240 + pax_force_retaddr
7241 ret
7242 CFI_ENDPROC
7243
7244 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
7245 index f6f5c53..b358b28 100644
7246 --- a/arch/x86/ia32/sys_ia32.c
7247 +++ b/arch/x86/ia32/sys_ia32.c
7248 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
7249 */
7250 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
7251 {
7252 - typeof(ubuf->st_uid) uid = 0;
7253 - typeof(ubuf->st_gid) gid = 0;
7254 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
7255 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
7256 SET_UID(uid, stat->uid);
7257 SET_GID(gid, stat->gid);
7258 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7259 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7260 }
7261 set_fs(KERNEL_DS);
7262 ret = sys_rt_sigprocmask(how,
7263 - set ? (sigset_t __user *)&s : NULL,
7264 - oset ? (sigset_t __user *)&s : NULL,
7265 + set ? (sigset_t __force_user *)&s : NULL,
7266 + oset ? (sigset_t __force_user *)&s : NULL,
7267 sigsetsize);
7268 set_fs(old_fs);
7269 if (ret)
7270 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7271 return alarm_setitimer(seconds);
7272 }
7273
7274 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7275 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7276 int options)
7277 {
7278 return compat_sys_wait4(pid, stat_addr, options, NULL);
7279 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7280 mm_segment_t old_fs = get_fs();
7281
7282 set_fs(KERNEL_DS);
7283 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7284 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7285 set_fs(old_fs);
7286 if (put_compat_timespec(&t, interval))
7287 return -EFAULT;
7288 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7289 mm_segment_t old_fs = get_fs();
7290
7291 set_fs(KERNEL_DS);
7292 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7293 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7294 set_fs(old_fs);
7295 if (!ret) {
7296 switch (_NSIG_WORDS) {
7297 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7298 if (copy_siginfo_from_user32(&info, uinfo))
7299 return -EFAULT;
7300 set_fs(KERNEL_DS);
7301 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7302 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7303 set_fs(old_fs);
7304 return ret;
7305 }
7306 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7307 return -EFAULT;
7308
7309 set_fs(KERNEL_DS);
7310 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7311 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7312 count);
7313 set_fs(old_fs);
7314
7315 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7316 index 091508b..7692c6f 100644
7317 --- a/arch/x86/include/asm/alternative-asm.h
7318 +++ b/arch/x86/include/asm/alternative-asm.h
7319 @@ -4,10 +4,10 @@
7320
7321 #ifdef CONFIG_SMP
7322 .macro LOCK_PREFIX
7323 -1: lock
7324 +672: lock
7325 .section .smp_locks,"a"
7326 .balign 4
7327 - .long 1b - .
7328 + .long 672b - .
7329 .previous
7330 .endm
7331 #else
7332 @@ -15,6 +15,45 @@
7333 .endm
7334 #endif
7335
7336 +#ifdef KERNEXEC_PLUGIN
7337 + .macro pax_force_retaddr_bts rip=0
7338 + btsq $63,\rip(%rsp)
7339 + .endm
7340 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7341 + .macro pax_force_retaddr rip=0, reload=0
7342 + btsq $63,\rip(%rsp)
7343 + .endm
7344 + .macro pax_force_fptr ptr
7345 + btsq $63,\ptr
7346 + .endm
7347 + .macro pax_set_fptr_mask
7348 + .endm
7349 +#endif
7350 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7351 + .macro pax_force_retaddr rip=0, reload=0
7352 + .if \reload
7353 + pax_set_fptr_mask
7354 + .endif
7355 + orq %r10,\rip(%rsp)
7356 + .endm
7357 + .macro pax_force_fptr ptr
7358 + orq %r10,\ptr
7359 + .endm
7360 + .macro pax_set_fptr_mask
7361 + movabs $0x8000000000000000,%r10
7362 + .endm
7363 +#endif
7364 +#else
7365 + .macro pax_force_retaddr rip=0, reload=0
7366 + .endm
7367 + .macro pax_force_fptr ptr
7368 + .endm
7369 + .macro pax_force_retaddr_bts rip=0
7370 + .endm
7371 + .macro pax_set_fptr_mask
7372 + .endm
7373 +#endif
7374 +
7375 .macro altinstruction_entry orig alt feature orig_len alt_len
7376 .long \orig - .
7377 .long \alt - .
7378 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7379 index 37ad100..7d47faa 100644
7380 --- a/arch/x86/include/asm/alternative.h
7381 +++ b/arch/x86/include/asm/alternative.h
7382 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7383 ".section .discard,\"aw\",@progbits\n" \
7384 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7385 ".previous\n" \
7386 - ".section .altinstr_replacement, \"ax\"\n" \
7387 + ".section .altinstr_replacement, \"a\"\n" \
7388 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7389 ".previous"
7390
7391 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7392 index 1a6c09a..fec2432 100644
7393 --- a/arch/x86/include/asm/apic.h
7394 +++ b/arch/x86/include/asm/apic.h
7395 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7396
7397 #ifdef CONFIG_X86_LOCAL_APIC
7398
7399 -extern unsigned int apic_verbosity;
7400 +extern int apic_verbosity;
7401 extern int local_apic_timer_c2_ok;
7402
7403 extern int disable_apic;
7404 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7405 index 20370c6..a2eb9b0 100644
7406 --- a/arch/x86/include/asm/apm.h
7407 +++ b/arch/x86/include/asm/apm.h
7408 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7409 __asm__ __volatile__(APM_DO_ZERO_SEGS
7410 "pushl %%edi\n\t"
7411 "pushl %%ebp\n\t"
7412 - "lcall *%%cs:apm_bios_entry\n\t"
7413 + "lcall *%%ss:apm_bios_entry\n\t"
7414 "setc %%al\n\t"
7415 "popl %%ebp\n\t"
7416 "popl %%edi\n\t"
7417 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7418 __asm__ __volatile__(APM_DO_ZERO_SEGS
7419 "pushl %%edi\n\t"
7420 "pushl %%ebp\n\t"
7421 - "lcall *%%cs:apm_bios_entry\n\t"
7422 + "lcall *%%ss:apm_bios_entry\n\t"
7423 "setc %%bl\n\t"
7424 "popl %%ebp\n\t"
7425 "popl %%edi\n\t"
7426 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7427 index 58cb6d4..ca9010d 100644
7428 --- a/arch/x86/include/asm/atomic.h
7429 +++ b/arch/x86/include/asm/atomic.h
7430 @@ -22,7 +22,18 @@
7431 */
7432 static inline int atomic_read(const atomic_t *v)
7433 {
7434 - return (*(volatile int *)&(v)->counter);
7435 + return (*(volatile const int *)&(v)->counter);
7436 +}
7437 +
7438 +/**
7439 + * atomic_read_unchecked - read atomic variable
7440 + * @v: pointer of type atomic_unchecked_t
7441 + *
7442 + * Atomically reads the value of @v.
7443 + */
7444 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7445 +{
7446 + return (*(volatile const int *)&(v)->counter);
7447 }
7448
7449 /**
7450 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7451 }
7452
7453 /**
7454 + * atomic_set_unchecked - set atomic variable
7455 + * @v: pointer of type atomic_unchecked_t
7456 + * @i: required value
7457 + *
7458 + * Atomically sets the value of @v to @i.
7459 + */
7460 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7461 +{
7462 + v->counter = i;
7463 +}
7464 +
7465 +/**
7466 * atomic_add - add integer to atomic variable
7467 * @i: integer value to add
7468 * @v: pointer of type atomic_t
7469 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7470 */
7471 static inline void atomic_add(int i, atomic_t *v)
7472 {
7473 - asm volatile(LOCK_PREFIX "addl %1,%0"
7474 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7475 +
7476 +#ifdef CONFIG_PAX_REFCOUNT
7477 + "jno 0f\n"
7478 + LOCK_PREFIX "subl %1,%0\n"
7479 + "int $4\n0:\n"
7480 + _ASM_EXTABLE(0b, 0b)
7481 +#endif
7482 +
7483 + : "+m" (v->counter)
7484 + : "ir" (i));
7485 +}
7486 +
7487 +/**
7488 + * atomic_add_unchecked - add integer to atomic variable
7489 + * @i: integer value to add
7490 + * @v: pointer of type atomic_unchecked_t
7491 + *
7492 + * Atomically adds @i to @v.
7493 + */
7494 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7495 +{
7496 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7497 : "+m" (v->counter)
7498 : "ir" (i));
7499 }
7500 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7501 */
7502 static inline void atomic_sub(int i, atomic_t *v)
7503 {
7504 - asm volatile(LOCK_PREFIX "subl %1,%0"
7505 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 + "jno 0f\n"
7509 + LOCK_PREFIX "addl %1,%0\n"
7510 + "int $4\n0:\n"
7511 + _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514 + : "+m" (v->counter)
7515 + : "ir" (i));
7516 +}
7517 +
7518 +/**
7519 + * atomic_sub_unchecked - subtract integer from atomic variable
7520 + * @i: integer value to subtract
7521 + * @v: pointer of type atomic_unchecked_t
7522 + *
7523 + * Atomically subtracts @i from @v.
7524 + */
7525 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7526 +{
7527 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7528 : "+m" (v->counter)
7529 : "ir" (i));
7530 }
7531 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7532 {
7533 unsigned char c;
7534
7535 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7536 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7537 +
7538 +#ifdef CONFIG_PAX_REFCOUNT
7539 + "jno 0f\n"
7540 + LOCK_PREFIX "addl %2,%0\n"
7541 + "int $4\n0:\n"
7542 + _ASM_EXTABLE(0b, 0b)
7543 +#endif
7544 +
7545 + "sete %1\n"
7546 : "+m" (v->counter), "=qm" (c)
7547 : "ir" (i) : "memory");
7548 return c;
7549 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7550 */
7551 static inline void atomic_inc(atomic_t *v)
7552 {
7553 - asm volatile(LOCK_PREFIX "incl %0"
7554 + asm volatile(LOCK_PREFIX "incl %0\n"
7555 +
7556 +#ifdef CONFIG_PAX_REFCOUNT
7557 + "jno 0f\n"
7558 + LOCK_PREFIX "decl %0\n"
7559 + "int $4\n0:\n"
7560 + _ASM_EXTABLE(0b, 0b)
7561 +#endif
7562 +
7563 + : "+m" (v->counter));
7564 +}
7565 +
7566 +/**
7567 + * atomic_inc_unchecked - increment atomic variable
7568 + * @v: pointer of type atomic_unchecked_t
7569 + *
7570 + * Atomically increments @v by 1.
7571 + */
7572 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7573 +{
7574 + asm volatile(LOCK_PREFIX "incl %0\n"
7575 : "+m" (v->counter));
7576 }
7577
7578 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7579 */
7580 static inline void atomic_dec(atomic_t *v)
7581 {
7582 - asm volatile(LOCK_PREFIX "decl %0"
7583 + asm volatile(LOCK_PREFIX "decl %0\n"
7584 +
7585 +#ifdef CONFIG_PAX_REFCOUNT
7586 + "jno 0f\n"
7587 + LOCK_PREFIX "incl %0\n"
7588 + "int $4\n0:\n"
7589 + _ASM_EXTABLE(0b, 0b)
7590 +#endif
7591 +
7592 + : "+m" (v->counter));
7593 +}
7594 +
7595 +/**
7596 + * atomic_dec_unchecked - decrement atomic variable
7597 + * @v: pointer of type atomic_unchecked_t
7598 + *
7599 + * Atomically decrements @v by 1.
7600 + */
7601 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7602 +{
7603 + asm volatile(LOCK_PREFIX "decl %0\n"
7604 : "+m" (v->counter));
7605 }
7606
7607 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7608 {
7609 unsigned char c;
7610
7611 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7612 + asm volatile(LOCK_PREFIX "decl %0\n"
7613 +
7614 +#ifdef CONFIG_PAX_REFCOUNT
7615 + "jno 0f\n"
7616 + LOCK_PREFIX "incl %0\n"
7617 + "int $4\n0:\n"
7618 + _ASM_EXTABLE(0b, 0b)
7619 +#endif
7620 +
7621 + "sete %1\n"
7622 : "+m" (v->counter), "=qm" (c)
7623 : : "memory");
7624 return c != 0;
7625 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7626 {
7627 unsigned char c;
7628
7629 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7630 + asm volatile(LOCK_PREFIX "incl %0\n"
7631 +
7632 +#ifdef CONFIG_PAX_REFCOUNT
7633 + "jno 0f\n"
7634 + LOCK_PREFIX "decl %0\n"
7635 + "int $4\n0:\n"
7636 + _ASM_EXTABLE(0b, 0b)
7637 +#endif
7638 +
7639 + "sete %1\n"
7640 + : "+m" (v->counter), "=qm" (c)
7641 + : : "memory");
7642 + return c != 0;
7643 +}
7644 +
7645 +/**
7646 + * atomic_inc_and_test_unchecked - increment and test
7647 + * @v: pointer of type atomic_unchecked_t
7648 + *
7649 + * Atomically increments @v by 1
7650 + * and returns true if the result is zero, or false for all
7651 + * other cases.
7652 + */
7653 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7654 +{
7655 + unsigned char c;
7656 +
7657 + asm volatile(LOCK_PREFIX "incl %0\n"
7658 + "sete %1\n"
7659 : "+m" (v->counter), "=qm" (c)
7660 : : "memory");
7661 return c != 0;
7662 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7663 {
7664 unsigned char c;
7665
7666 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7667 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7668 +
7669 +#ifdef CONFIG_PAX_REFCOUNT
7670 + "jno 0f\n"
7671 + LOCK_PREFIX "subl %2,%0\n"
7672 + "int $4\n0:\n"
7673 + _ASM_EXTABLE(0b, 0b)
7674 +#endif
7675 +
7676 + "sets %1\n"
7677 : "+m" (v->counter), "=qm" (c)
7678 : "ir" (i) : "memory");
7679 return c;
7680 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7681 goto no_xadd;
7682 #endif
7683 /* Modern 486+ processor */
7684 - return i + xadd(&v->counter, i);
7685 + return i + xadd_check_overflow(&v->counter, i);
7686
7687 #ifdef CONFIG_M386
7688 no_xadd: /* Legacy 386 processor */
7689 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7690 }
7691
7692 /**
7693 + * atomic_add_return_unchecked - add integer and return
7694 + * @i: integer value to add
7695 + * @v: pointer of type atomic_unchecked_t
7696 + *
7697 + * Atomically adds @i to @v and returns @i + @v
7698 + */
7699 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7700 +{
7701 +#ifdef CONFIG_M386
7702 + int __i;
7703 + unsigned long flags;
7704 + if (unlikely(boot_cpu_data.x86 <= 3))
7705 + goto no_xadd;
7706 +#endif
7707 + /* Modern 486+ processor */
7708 + return i + xadd(&v->counter, i);
7709 +
7710 +#ifdef CONFIG_M386
7711 +no_xadd: /* Legacy 386 processor */
7712 + raw_local_irq_save(flags);
7713 + __i = atomic_read_unchecked(v);
7714 + atomic_set_unchecked(v, i + __i);
7715 + raw_local_irq_restore(flags);
7716 + return i + __i;
7717 +#endif
7718 +}
7719 +
7720 +/**
7721 * atomic_sub_return - subtract integer and return
7722 * @v: pointer of type atomic_t
7723 * @i: integer value to subtract
7724 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7725 }
7726
7727 #define atomic_inc_return(v) (atomic_add_return(1, v))
7728 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7729 +{
7730 + return atomic_add_return_unchecked(1, v);
7731 +}
7732 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7733
7734 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7735 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7736 return cmpxchg(&v->counter, old, new);
7737 }
7738
7739 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7740 +{
7741 + return cmpxchg(&v->counter, old, new);
7742 +}
7743 +
7744 static inline int atomic_xchg(atomic_t *v, int new)
7745 {
7746 return xchg(&v->counter, new);
7747 }
7748
7749 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7750 +{
7751 + return xchg(&v->counter, new);
7752 +}
7753 +
7754 /**
7755 * __atomic_add_unless - add unless the number is already a given value
7756 * @v: pointer of type atomic_t
7757 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7758 */
7759 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7760 {
7761 - int c, old;
7762 + int c, old, new;
7763 c = atomic_read(v);
7764 for (;;) {
7765 - if (unlikely(c == (u)))
7766 + if (unlikely(c == u))
7767 break;
7768 - old = atomic_cmpxchg((v), c, c + (a));
7769 +
7770 + asm volatile("addl %2,%0\n"
7771 +
7772 +#ifdef CONFIG_PAX_REFCOUNT
7773 + "jno 0f\n"
7774 + "subl %2,%0\n"
7775 + "int $4\n0:\n"
7776 + _ASM_EXTABLE(0b, 0b)
7777 +#endif
7778 +
7779 + : "=r" (new)
7780 + : "0" (c), "ir" (a));
7781 +
7782 + old = atomic_cmpxchg(v, c, new);
7783 if (likely(old == c))
7784 break;
7785 c = old;
7786 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7787 return c;
7788 }
7789
7790 +/**
7791 + * atomic_inc_not_zero_hint - increment if not null
7792 + * @v: pointer of type atomic_t
7793 + * @hint: probable value of the atomic before the increment
7794 + *
7795 + * This version of atomic_inc_not_zero() gives a hint of probable
7796 + * value of the atomic. This helps processor to not read the memory
7797 + * before doing the atomic read/modify/write cycle, lowering
7798 + * number of bus transactions on some arches.
7799 + *
7800 + * Returns: 0 if increment was not done, 1 otherwise.
7801 + */
7802 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7803 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7804 +{
7805 + int val, c = hint, new;
7806 +
7807 + /* sanity test, should be removed by compiler if hint is a constant */
7808 + if (!hint)
7809 + return __atomic_add_unless(v, 1, 0);
7810 +
7811 + do {
7812 + asm volatile("incl %0\n"
7813 +
7814 +#ifdef CONFIG_PAX_REFCOUNT
7815 + "jno 0f\n"
7816 + "decl %0\n"
7817 + "int $4\n0:\n"
7818 + _ASM_EXTABLE(0b, 0b)
7819 +#endif
7820 +
7821 + : "=r" (new)
7822 + : "0" (c));
7823 +
7824 + val = atomic_cmpxchg(v, c, new);
7825 + if (val == c)
7826 + return 1;
7827 + c = val;
7828 + } while (c);
7829 +
7830 + return 0;
7831 +}
7832
7833 /*
7834 * atomic_dec_if_positive - decrement by 1 if old value positive
7835 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7836 index 24098aa..1e37723 100644
7837 --- a/arch/x86/include/asm/atomic64_32.h
7838 +++ b/arch/x86/include/asm/atomic64_32.h
7839 @@ -12,6 +12,14 @@ typedef struct {
7840 u64 __aligned(8) counter;
7841 } atomic64_t;
7842
7843 +#ifdef CONFIG_PAX_REFCOUNT
7844 +typedef struct {
7845 + u64 __aligned(8) counter;
7846 +} atomic64_unchecked_t;
7847 +#else
7848 +typedef atomic64_t atomic64_unchecked_t;
7849 +#endif
7850 +
7851 #define ATOMIC64_INIT(val) { (val) }
7852
7853 #ifdef CONFIG_X86_CMPXCHG64
7854 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7855 }
7856
7857 /**
7858 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7859 + * @p: pointer to type atomic64_unchecked_t
7860 + * @o: expected value
7861 + * @n: new value
7862 + *
7863 + * Atomically sets @v to @n if it was equal to @o and returns
7864 + * the old value.
7865 + */
7866 +
7867 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7868 +{
7869 + return cmpxchg64(&v->counter, o, n);
7870 +}
7871 +
7872 +/**
7873 * atomic64_xchg - xchg atomic64 variable
7874 * @v: pointer to type atomic64_t
7875 * @n: value to assign
7876 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7877 }
7878
7879 /**
7880 + * atomic64_set_unchecked - set atomic64 variable
7881 + * @v: pointer to type atomic64_unchecked_t
7882 + * @n: value to assign
7883 + *
7884 + * Atomically sets the value of @v to @n.
7885 + */
7886 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7887 +{
7888 + unsigned high = (unsigned)(i >> 32);
7889 + unsigned low = (unsigned)i;
7890 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7891 + : "+b" (low), "+c" (high)
7892 + : "S" (v)
7893 + : "eax", "edx", "memory"
7894 + );
7895 +}
7896 +
7897 +/**
7898 * atomic64_read - read atomic64 variable
7899 * @v: pointer to type atomic64_t
7900 *
7901 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7902 }
7903
7904 /**
7905 + * atomic64_read_unchecked - read atomic64 variable
7906 + * @v: pointer to type atomic64_unchecked_t
7907 + *
7908 + * Atomically reads the value of @v and returns it.
7909 + */
7910 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7911 +{
7912 + long long r;
7913 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7914 + : "=A" (r), "+c" (v)
7915 + : : "memory"
7916 + );
7917 + return r;
7918 + }
7919 +
7920 +/**
7921 * atomic64_add_return - add and return
7922 * @i: integer value to add
7923 * @v: pointer to type atomic64_t
7924 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7925 return i;
7926 }
7927
7928 +/**
7929 + * atomic64_add_return_unchecked - add and return
7930 + * @i: integer value to add
7931 + * @v: pointer to type atomic64_unchecked_t
7932 + *
7933 + * Atomically adds @i to @v and returns @i + *@v
7934 + */
7935 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7936 +{
7937 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7938 + : "+A" (i), "+c" (v)
7939 + : : "memory"
7940 + );
7941 + return i;
7942 +}
7943 +
7944 /*
7945 * Other variants with different arithmetic operators:
7946 */
7947 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7948 return a;
7949 }
7950
7951 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7952 +{
7953 + long long a;
7954 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7955 + : "=A" (a)
7956 + : "S" (v)
7957 + : "memory", "ecx"
7958 + );
7959 + return a;
7960 +}
7961 +
7962 static inline long long atomic64_dec_return(atomic64_t *v)
7963 {
7964 long long a;
7965 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7966 }
7967
7968 /**
7969 + * atomic64_add_unchecked - add integer to atomic64 variable
7970 + * @i: integer value to add
7971 + * @v: pointer to type atomic64_unchecked_t
7972 + *
7973 + * Atomically adds @i to @v.
7974 + */
7975 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7976 +{
7977 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7978 + : "+A" (i), "+c" (v)
7979 + : : "memory"
7980 + );
7981 + return i;
7982 +}
7983 +
7984 +/**
7985 * atomic64_sub - subtract the atomic64 variable
7986 * @i: integer value to subtract
7987 * @v: pointer to type atomic64_t
7988 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7989 index 0e1cbfc..5623683 100644
7990 --- a/arch/x86/include/asm/atomic64_64.h
7991 +++ b/arch/x86/include/asm/atomic64_64.h
7992 @@ -18,7 +18,19 @@
7993 */
7994 static inline long atomic64_read(const atomic64_t *v)
7995 {
7996 - return (*(volatile long *)&(v)->counter);
7997 + return (*(volatile const long *)&(v)->counter);
7998 +}
7999 +
8000 +/**
8001 + * atomic64_read_unchecked - read atomic64 variable
8002 + * @v: pointer of type atomic64_unchecked_t
8003 + *
8004 + * Atomically reads the value of @v.
8005 + * Doesn't imply a read memory barrier.
8006 + */
8007 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8008 +{
8009 + return (*(volatile const long *)&(v)->counter);
8010 }
8011
8012 /**
8013 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
8014 }
8015
8016 /**
8017 + * atomic64_set_unchecked - set atomic64 variable
8018 + * @v: pointer to type atomic64_unchecked_t
8019 + * @i: required value
8020 + *
8021 + * Atomically sets the value of @v to @i.
8022 + */
8023 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8024 +{
8025 + v->counter = i;
8026 +}
8027 +
8028 +/**
8029 * atomic64_add - add integer to atomic64 variable
8030 * @i: integer value to add
8031 * @v: pointer to type atomic64_t
8032 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
8033 */
8034 static inline void atomic64_add(long i, atomic64_t *v)
8035 {
8036 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
8037 +
8038 +#ifdef CONFIG_PAX_REFCOUNT
8039 + "jno 0f\n"
8040 + LOCK_PREFIX "subq %1,%0\n"
8041 + "int $4\n0:\n"
8042 + _ASM_EXTABLE(0b, 0b)
8043 +#endif
8044 +
8045 + : "=m" (v->counter)
8046 + : "er" (i), "m" (v->counter));
8047 +}
8048 +
8049 +/**
8050 + * atomic64_add_unchecked - add integer to atomic64 variable
8051 + * @i: integer value to add
8052 + * @v: pointer to type atomic64_unchecked_t
8053 + *
8054 + * Atomically adds @i to @v.
8055 + */
8056 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
8057 +{
8058 asm volatile(LOCK_PREFIX "addq %1,%0"
8059 : "=m" (v->counter)
8060 : "er" (i), "m" (v->counter));
8061 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
8062 */
8063 static inline void atomic64_sub(long i, atomic64_t *v)
8064 {
8065 - asm volatile(LOCK_PREFIX "subq %1,%0"
8066 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8067 +
8068 +#ifdef CONFIG_PAX_REFCOUNT
8069 + "jno 0f\n"
8070 + LOCK_PREFIX "addq %1,%0\n"
8071 + "int $4\n0:\n"
8072 + _ASM_EXTABLE(0b, 0b)
8073 +#endif
8074 +
8075 + : "=m" (v->counter)
8076 + : "er" (i), "m" (v->counter));
8077 +}
8078 +
8079 +/**
8080 + * atomic64_sub_unchecked - subtract the atomic64 variable
8081 + * @i: integer value to subtract
8082 + * @v: pointer to type atomic64_unchecked_t
8083 + *
8084 + * Atomically subtracts @i from @v.
8085 + */
8086 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
8087 +{
8088 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8089 : "=m" (v->counter)
8090 : "er" (i), "m" (v->counter));
8091 }
8092 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8093 {
8094 unsigned char c;
8095
8096 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
8097 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
8098 +
8099 +#ifdef CONFIG_PAX_REFCOUNT
8100 + "jno 0f\n"
8101 + LOCK_PREFIX "addq %2,%0\n"
8102 + "int $4\n0:\n"
8103 + _ASM_EXTABLE(0b, 0b)
8104 +#endif
8105 +
8106 + "sete %1\n"
8107 : "=m" (v->counter), "=qm" (c)
8108 : "er" (i), "m" (v->counter) : "memory");
8109 return c;
8110 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8111 */
8112 static inline void atomic64_inc(atomic64_t *v)
8113 {
8114 + asm volatile(LOCK_PREFIX "incq %0\n"
8115 +
8116 +#ifdef CONFIG_PAX_REFCOUNT
8117 + "jno 0f\n"
8118 + LOCK_PREFIX "decq %0\n"
8119 + "int $4\n0:\n"
8120 + _ASM_EXTABLE(0b, 0b)
8121 +#endif
8122 +
8123 + : "=m" (v->counter)
8124 + : "m" (v->counter));
8125 +}
8126 +
8127 +/**
8128 + * atomic64_inc_unchecked - increment atomic64 variable
8129 + * @v: pointer to type atomic64_unchecked_t
8130 + *
8131 + * Atomically increments @v by 1.
8132 + */
8133 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8134 +{
8135 asm volatile(LOCK_PREFIX "incq %0"
8136 : "=m" (v->counter)
8137 : "m" (v->counter));
8138 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
8139 */
8140 static inline void atomic64_dec(atomic64_t *v)
8141 {
8142 - asm volatile(LOCK_PREFIX "decq %0"
8143 + asm volatile(LOCK_PREFIX "decq %0\n"
8144 +
8145 +#ifdef CONFIG_PAX_REFCOUNT
8146 + "jno 0f\n"
8147 + LOCK_PREFIX "incq %0\n"
8148 + "int $4\n0:\n"
8149 + _ASM_EXTABLE(0b, 0b)
8150 +#endif
8151 +
8152 + : "=m" (v->counter)
8153 + : "m" (v->counter));
8154 +}
8155 +
8156 +/**
8157 + * atomic64_dec_unchecked - decrement atomic64 variable
8158 + * @v: pointer to type atomic64_t
8159 + *
8160 + * Atomically decrements @v by 1.
8161 + */
8162 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8163 +{
8164 + asm volatile(LOCK_PREFIX "decq %0\n"
8165 : "=m" (v->counter)
8166 : "m" (v->counter));
8167 }
8168 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
8169 {
8170 unsigned char c;
8171
8172 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
8173 + asm volatile(LOCK_PREFIX "decq %0\n"
8174 +
8175 +#ifdef CONFIG_PAX_REFCOUNT
8176 + "jno 0f\n"
8177 + LOCK_PREFIX "incq %0\n"
8178 + "int $4\n0:\n"
8179 + _ASM_EXTABLE(0b, 0b)
8180 +#endif
8181 +
8182 + "sete %1\n"
8183 : "=m" (v->counter), "=qm" (c)
8184 : "m" (v->counter) : "memory");
8185 return c != 0;
8186 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
8187 {
8188 unsigned char c;
8189
8190 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
8191 + asm volatile(LOCK_PREFIX "incq %0\n"
8192 +
8193 +#ifdef CONFIG_PAX_REFCOUNT
8194 + "jno 0f\n"
8195 + LOCK_PREFIX "decq %0\n"
8196 + "int $4\n0:\n"
8197 + _ASM_EXTABLE(0b, 0b)
8198 +#endif
8199 +
8200 + "sete %1\n"
8201 : "=m" (v->counter), "=qm" (c)
8202 : "m" (v->counter) : "memory");
8203 return c != 0;
8204 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8205 {
8206 unsigned char c;
8207
8208 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8209 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
8210 +
8211 +#ifdef CONFIG_PAX_REFCOUNT
8212 + "jno 0f\n"
8213 + LOCK_PREFIX "subq %2,%0\n"
8214 + "int $4\n0:\n"
8215 + _ASM_EXTABLE(0b, 0b)
8216 +#endif
8217 +
8218 + "sets %1\n"
8219 : "=m" (v->counter), "=qm" (c)
8220 : "er" (i), "m" (v->counter) : "memory");
8221 return c;
8222 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8223 */
8224 static inline long atomic64_add_return(long i, atomic64_t *v)
8225 {
8226 + return i + xadd_check_overflow(&v->counter, i);
8227 +}
8228 +
8229 +/**
8230 + * atomic64_add_return_unchecked - add and return
8231 + * @i: integer value to add
8232 + * @v: pointer to type atomic64_unchecked_t
8233 + *
8234 + * Atomically adds @i to @v and returns @i + @v
8235 + */
8236 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8237 +{
8238 return i + xadd(&v->counter, i);
8239 }
8240
8241 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
8242 }
8243
8244 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8245 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8246 +{
8247 + return atomic64_add_return_unchecked(1, v);
8248 +}
8249 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8250
8251 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8252 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8253 return cmpxchg(&v->counter, old, new);
8254 }
8255
8256 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8257 +{
8258 + return cmpxchg(&v->counter, old, new);
8259 +}
8260 +
8261 static inline long atomic64_xchg(atomic64_t *v, long new)
8262 {
8263 return xchg(&v->counter, new);
8264 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8265 */
8266 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8267 {
8268 - long c, old;
8269 + long c, old, new;
8270 c = atomic64_read(v);
8271 for (;;) {
8272 - if (unlikely(c == (u)))
8273 + if (unlikely(c == u))
8274 break;
8275 - old = atomic64_cmpxchg((v), c, c + (a));
8276 +
8277 + asm volatile("add %2,%0\n"
8278 +
8279 +#ifdef CONFIG_PAX_REFCOUNT
8280 + "jno 0f\n"
8281 + "sub %2,%0\n"
8282 + "int $4\n0:\n"
8283 + _ASM_EXTABLE(0b, 0b)
8284 +#endif
8285 +
8286 + : "=r" (new)
8287 + : "0" (c), "ir" (a));
8288 +
8289 + old = atomic64_cmpxchg(v, c, new);
8290 if (likely(old == c))
8291 break;
8292 c = old;
8293 }
8294 - return c != (u);
8295 + return c != u;
8296 }
8297
8298 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8299 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8300 index 1775d6e..b65017f 100644
8301 --- a/arch/x86/include/asm/bitops.h
8302 +++ b/arch/x86/include/asm/bitops.h
8303 @@ -38,7 +38,7 @@
8304 * a mask operation on a byte.
8305 */
8306 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8307 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8308 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8309 #define CONST_MASK(nr) (1 << ((nr) & 7))
8310
8311 /**
8312 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8313 index 5e1a2ee..c9f9533 100644
8314 --- a/arch/x86/include/asm/boot.h
8315 +++ b/arch/x86/include/asm/boot.h
8316 @@ -11,10 +11,15 @@
8317 #include <asm/pgtable_types.h>
8318
8319 /* Physical address where kernel should be loaded. */
8320 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8321 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8322 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8323 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8324
8325 +#ifndef __ASSEMBLY__
8326 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8327 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8328 +#endif
8329 +
8330 /* Minimum kernel alignment, as a power of two */
8331 #ifdef CONFIG_X86_64
8332 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8333 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8334 index 48f99f1..d78ebf9 100644
8335 --- a/arch/x86/include/asm/cache.h
8336 +++ b/arch/x86/include/asm/cache.h
8337 @@ -5,12 +5,13 @@
8338
8339 /* L1 cache line size */
8340 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8341 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8342 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8343
8344 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8345 +#define __read_only __attribute__((__section__(".data..read_only")))
8346
8347 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8348 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8349 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8350
8351 #ifdef CONFIG_X86_VSMP
8352 #ifdef CONFIG_SMP
8353 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8354 index 4e12668..501d239 100644
8355 --- a/arch/x86/include/asm/cacheflush.h
8356 +++ b/arch/x86/include/asm/cacheflush.h
8357 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8358 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8359
8360 if (pg_flags == _PGMT_DEFAULT)
8361 - return -1;
8362 + return ~0UL;
8363 else if (pg_flags == _PGMT_WC)
8364 return _PAGE_CACHE_WC;
8365 else if (pg_flags == _PGMT_UC_MINUS)
8366 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8367 index 46fc474..b02b0f9 100644
8368 --- a/arch/x86/include/asm/checksum_32.h
8369 +++ b/arch/x86/include/asm/checksum_32.h
8370 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8371 int len, __wsum sum,
8372 int *src_err_ptr, int *dst_err_ptr);
8373
8374 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8375 + int len, __wsum sum,
8376 + int *src_err_ptr, int *dst_err_ptr);
8377 +
8378 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8379 + int len, __wsum sum,
8380 + int *src_err_ptr, int *dst_err_ptr);
8381 +
8382 /*
8383 * Note: when you get a NULL pointer exception here this means someone
8384 * passed in an incorrect kernel address to one of these functions.
8385 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8386 int *err_ptr)
8387 {
8388 might_sleep();
8389 - return csum_partial_copy_generic((__force void *)src, dst,
8390 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8391 len, sum, err_ptr, NULL);
8392 }
8393
8394 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8395 {
8396 might_sleep();
8397 if (access_ok(VERIFY_WRITE, dst, len))
8398 - return csum_partial_copy_generic(src, (__force void *)dst,
8399 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8400 len, sum, NULL, err_ptr);
8401
8402 if (len)
8403 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8404 index 5d3acdf..6447a02 100644
8405 --- a/arch/x86/include/asm/cmpxchg.h
8406 +++ b/arch/x86/include/asm/cmpxchg.h
8407 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8408 __compiletime_error("Bad argument size for cmpxchg");
8409 extern void __xadd_wrong_size(void)
8410 __compiletime_error("Bad argument size for xadd");
8411 +extern void __xadd_check_overflow_wrong_size(void)
8412 + __compiletime_error("Bad argument size for xadd_check_overflow");
8413
8414 /*
8415 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8416 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8417 __ret; \
8418 })
8419
8420 +#define __xadd_check_overflow(ptr, inc, lock) \
8421 + ({ \
8422 + __typeof__ (*(ptr)) __ret = (inc); \
8423 + switch (sizeof(*(ptr))) { \
8424 + case __X86_CASE_L: \
8425 + asm volatile (lock "xaddl %0, %1\n" \
8426 + "jno 0f\n" \
8427 + "mov %0,%1\n" \
8428 + "int $4\n0:\n" \
8429 + _ASM_EXTABLE(0b, 0b) \
8430 + : "+r" (__ret), "+m" (*(ptr)) \
8431 + : : "memory", "cc"); \
8432 + break; \
8433 + case __X86_CASE_Q: \
8434 + asm volatile (lock "xaddq %q0, %1\n" \
8435 + "jno 0f\n" \
8436 + "mov %0,%1\n" \
8437 + "int $4\n0:\n" \
8438 + _ASM_EXTABLE(0b, 0b) \
8439 + : "+r" (__ret), "+m" (*(ptr)) \
8440 + : : "memory", "cc"); \
8441 + break; \
8442 + default: \
8443 + __xadd_check_overflow_wrong_size(); \
8444 + } \
8445 + __ret; \
8446 + })
8447 +
8448 /*
8449 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8450 * value of "*ptr".
8451 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8452 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8453 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8454
8455 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8456 +
8457 #endif /* ASM_X86_CMPXCHG_H */
8458 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8459 index f3444f7..051a196 100644
8460 --- a/arch/x86/include/asm/cpufeature.h
8461 +++ b/arch/x86/include/asm/cpufeature.h
8462 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8463 ".section .discard,\"aw\",@progbits\n"
8464 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8465 ".previous\n"
8466 - ".section .altinstr_replacement,\"ax\"\n"
8467 + ".section .altinstr_replacement,\"a\"\n"
8468 "3: movb $1,%0\n"
8469 "4:\n"
8470 ".previous\n"
8471 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8472 index 41935fa..3b40db8 100644
8473 --- a/arch/x86/include/asm/desc.h
8474 +++ b/arch/x86/include/asm/desc.h
8475 @@ -4,6 +4,7 @@
8476 #include <asm/desc_defs.h>
8477 #include <asm/ldt.h>
8478 #include <asm/mmu.h>
8479 +#include <asm/pgtable.h>
8480
8481 #include <linux/smp.h>
8482
8483 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8484
8485 desc->type = (info->read_exec_only ^ 1) << 1;
8486 desc->type |= info->contents << 2;
8487 + desc->type |= info->seg_not_present ^ 1;
8488
8489 desc->s = 1;
8490 desc->dpl = 0x3;
8491 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8492 }
8493
8494 extern struct desc_ptr idt_descr;
8495 -extern gate_desc idt_table[];
8496 -
8497 -struct gdt_page {
8498 - struct desc_struct gdt[GDT_ENTRIES];
8499 -} __attribute__((aligned(PAGE_SIZE)));
8500 -
8501 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8502 +extern gate_desc idt_table[256];
8503
8504 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8505 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8506 {
8507 - return per_cpu(gdt_page, cpu).gdt;
8508 + return cpu_gdt_table[cpu];
8509 }
8510
8511 #ifdef CONFIG_X86_64
8512 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8513 unsigned long base, unsigned dpl, unsigned flags,
8514 unsigned short seg)
8515 {
8516 - gate->a = (seg << 16) | (base & 0xffff);
8517 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8518 + gate->gate.offset_low = base;
8519 + gate->gate.seg = seg;
8520 + gate->gate.reserved = 0;
8521 + gate->gate.type = type;
8522 + gate->gate.s = 0;
8523 + gate->gate.dpl = dpl;
8524 + gate->gate.p = 1;
8525 + gate->gate.offset_high = base >> 16;
8526 }
8527
8528 #endif
8529 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8530
8531 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8532 {
8533 + pax_open_kernel();
8534 memcpy(&idt[entry], gate, sizeof(*gate));
8535 + pax_close_kernel();
8536 }
8537
8538 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8539 {
8540 + pax_open_kernel();
8541 memcpy(&ldt[entry], desc, 8);
8542 + pax_close_kernel();
8543 }
8544
8545 static inline void
8546 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8547 default: size = sizeof(*gdt); break;
8548 }
8549
8550 + pax_open_kernel();
8551 memcpy(&gdt[entry], desc, size);
8552 + pax_close_kernel();
8553 }
8554
8555 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8556 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8557
8558 static inline void native_load_tr_desc(void)
8559 {
8560 + pax_open_kernel();
8561 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8562 + pax_close_kernel();
8563 }
8564
8565 static inline void native_load_gdt(const struct desc_ptr *dtr)
8566 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8567 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8568 unsigned int i;
8569
8570 + pax_open_kernel();
8571 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8572 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8573 + pax_close_kernel();
8574 }
8575
8576 #define _LDT_empty(info) \
8577 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8578 desc->limit = (limit >> 16) & 0xf;
8579 }
8580
8581 -static inline void _set_gate(int gate, unsigned type, void *addr,
8582 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8583 unsigned dpl, unsigned ist, unsigned seg)
8584 {
8585 gate_desc s;
8586 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8587 * Pentium F0 0F bugfix can have resulted in the mapped
8588 * IDT being write-protected.
8589 */
8590 -static inline void set_intr_gate(unsigned int n, void *addr)
8591 +static inline void set_intr_gate(unsigned int n, const void *addr)
8592 {
8593 BUG_ON((unsigned)n > 0xFF);
8594 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8595 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8596 /*
8597 * This routine sets up an interrupt gate at directory privilege level 3.
8598 */
8599 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8600 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8601 {
8602 BUG_ON((unsigned)n > 0xFF);
8603 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8604 }
8605
8606 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8607 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8608 {
8609 BUG_ON((unsigned)n > 0xFF);
8610 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8611 }
8612
8613 -static inline void set_trap_gate(unsigned int n, void *addr)
8614 +static inline void set_trap_gate(unsigned int n, const void *addr)
8615 {
8616 BUG_ON((unsigned)n > 0xFF);
8617 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8618 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8619 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8620 {
8621 BUG_ON((unsigned)n > 0xFF);
8622 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8623 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8624 }
8625
8626 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8627 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8628 {
8629 BUG_ON((unsigned)n > 0xFF);
8630 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8631 }
8632
8633 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8634 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8635 {
8636 BUG_ON((unsigned)n > 0xFF);
8637 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8638 }
8639
8640 +#ifdef CONFIG_X86_32
8641 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8642 +{
8643 + struct desc_struct d;
8644 +
8645 + if (likely(limit))
8646 + limit = (limit - 1UL) >> PAGE_SHIFT;
8647 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8648 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8649 +}
8650 +#endif
8651 +
8652 #endif /* _ASM_X86_DESC_H */
8653 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8654 index 278441f..b95a174 100644
8655 --- a/arch/x86/include/asm/desc_defs.h
8656 +++ b/arch/x86/include/asm/desc_defs.h
8657 @@ -31,6 +31,12 @@ struct desc_struct {
8658 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8659 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8660 };
8661 + struct {
8662 + u16 offset_low;
8663 + u16 seg;
8664 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8665 + unsigned offset_high: 16;
8666 + } gate;
8667 };
8668 } __attribute__((packed));
8669
8670 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8671 index 908b969..a1f4eb4 100644
8672 --- a/arch/x86/include/asm/e820.h
8673 +++ b/arch/x86/include/asm/e820.h
8674 @@ -69,7 +69,7 @@ struct e820map {
8675 #define ISA_START_ADDRESS 0xa0000
8676 #define ISA_END_ADDRESS 0x100000
8677
8678 -#define BIOS_BEGIN 0x000a0000
8679 +#define BIOS_BEGIN 0x000c0000
8680 #define BIOS_END 0x00100000
8681
8682 #define BIOS_ROM_BASE 0xffe00000
8683 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8684 index 5f962df..7289f09 100644
8685 --- a/arch/x86/include/asm/elf.h
8686 +++ b/arch/x86/include/asm/elf.h
8687 @@ -238,7 +238,25 @@ extern int force_personality32;
8688 the loader. We need to make sure that it is out of the way of the program
8689 that it will "exec", and that there is sufficient room for the brk. */
8690
8691 +#ifdef CONFIG_PAX_SEGMEXEC
8692 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8693 +#else
8694 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8695 +#endif
8696 +
8697 +#ifdef CONFIG_PAX_ASLR
8698 +#ifdef CONFIG_X86_32
8699 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8700 +
8701 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8702 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8703 +#else
8704 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8705 +
8706 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8707 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8708 +#endif
8709 +#endif
8710
8711 /* This yields a mask that user programs can use to figure out what
8712 instruction set this CPU supports. This could be done in user space,
8713 @@ -291,9 +309,7 @@ do { \
8714
8715 #define ARCH_DLINFO \
8716 do { \
8717 - if (vdso_enabled) \
8718 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8719 - (unsigned long)current->mm->context.vdso); \
8720 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8721 } while (0)
8722
8723 #define AT_SYSINFO 32
8724 @@ -304,7 +320,7 @@ do { \
8725
8726 #endif /* !CONFIG_X86_32 */
8727
8728 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8729 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8730
8731 #define VDSO_ENTRY \
8732 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8733 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8734 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8735 #define compat_arch_setup_additional_pages syscall32_setup_pages
8736
8737 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8738 -#define arch_randomize_brk arch_randomize_brk
8739 -
8740 /*
8741 * True on X86_32 or when emulating IA32 on X86_64
8742 */
8743 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8744 index cc70c1c..d96d011 100644
8745 --- a/arch/x86/include/asm/emergency-restart.h
8746 +++ b/arch/x86/include/asm/emergency-restart.h
8747 @@ -15,6 +15,6 @@ enum reboot_type {
8748
8749 extern enum reboot_type reboot_type;
8750
8751 -extern void machine_emergency_restart(void);
8752 +extern void machine_emergency_restart(void) __noreturn;
8753
8754 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8755 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8756 index d09bb03..4ea4194 100644
8757 --- a/arch/x86/include/asm/futex.h
8758 +++ b/arch/x86/include/asm/futex.h
8759 @@ -12,16 +12,18 @@
8760 #include <asm/system.h>
8761
8762 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8763 + typecheck(u32 __user *, uaddr); \
8764 asm volatile("1:\t" insn "\n" \
8765 "2:\t.section .fixup,\"ax\"\n" \
8766 "3:\tmov\t%3, %1\n" \
8767 "\tjmp\t2b\n" \
8768 "\t.previous\n" \
8769 _ASM_EXTABLE(1b, 3b) \
8770 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8771 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8772 : "i" (-EFAULT), "0" (oparg), "1" (0))
8773
8774 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8775 + typecheck(u32 __user *, uaddr); \
8776 asm volatile("1:\tmovl %2, %0\n" \
8777 "\tmovl\t%0, %3\n" \
8778 "\t" insn "\n" \
8779 @@ -34,7 +36,7 @@
8780 _ASM_EXTABLE(1b, 4b) \
8781 _ASM_EXTABLE(2b, 4b) \
8782 : "=&a" (oldval), "=&r" (ret), \
8783 - "+m" (*uaddr), "=&r" (tem) \
8784 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8785 : "r" (oparg), "i" (-EFAULT), "1" (0))
8786
8787 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8788 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8789
8790 switch (op) {
8791 case FUTEX_OP_SET:
8792 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8793 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8794 break;
8795 case FUTEX_OP_ADD:
8796 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8797 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8798 uaddr, oparg);
8799 break;
8800 case FUTEX_OP_OR:
8801 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8802 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8803 return -EFAULT;
8804
8805 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8806 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8807 "2:\t.section .fixup, \"ax\"\n"
8808 "3:\tmov %3, %0\n"
8809 "\tjmp 2b\n"
8810 "\t.previous\n"
8811 _ASM_EXTABLE(1b, 3b)
8812 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8813 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8814 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8815 : "memory"
8816 );
8817 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8818 index eb92a6e..b98b2f4 100644
8819 --- a/arch/x86/include/asm/hw_irq.h
8820 +++ b/arch/x86/include/asm/hw_irq.h
8821 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8822 extern void enable_IO_APIC(void);
8823
8824 /* Statistics */
8825 -extern atomic_t irq_err_count;
8826 -extern atomic_t irq_mis_count;
8827 +extern atomic_unchecked_t irq_err_count;
8828 +extern atomic_unchecked_t irq_mis_count;
8829
8830 /* EISA */
8831 extern void eisa_set_level_irq(unsigned int irq);
8832 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8833 index a850b4d..bae26dc 100644
8834 --- a/arch/x86/include/asm/i387.h
8835 +++ b/arch/x86/include/asm/i387.h
8836 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8837 {
8838 int err;
8839
8840 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8841 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8842 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8843 +#endif
8844 +
8845 /* See comment in fxsave() below. */
8846 #ifdef CONFIG_AS_FXSAVEQ
8847 asm volatile("1: fxrstorq %[fx]\n\t"
8848 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8849 {
8850 int err;
8851
8852 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8853 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8854 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8855 +#endif
8856 +
8857 /*
8858 * Clear the bytes not touched by the fxsave and reserved
8859 * for the SW usage.
8860 @@ -424,7 +434,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
8861 static inline bool interrupted_user_mode(void)
8862 {
8863 struct pt_regs *regs = get_irq_regs();
8864 - return regs && user_mode_vm(regs);
8865 + return regs && user_mode(regs);
8866 }
8867
8868 /*
8869 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8870 index d8e8eef..99f81ae 100644
8871 --- a/arch/x86/include/asm/io.h
8872 +++ b/arch/x86/include/asm/io.h
8873 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8874
8875 #include <linux/vmalloc.h>
8876
8877 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8878 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8879 +{
8880 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8881 +}
8882 +
8883 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8884 +{
8885 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8886 +}
8887 +
8888 /*
8889 * Convert a virtual cached pointer to an uncached pointer
8890 */
8891 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8892 index bba3cf8..06bc8da 100644
8893 --- a/arch/x86/include/asm/irqflags.h
8894 +++ b/arch/x86/include/asm/irqflags.h
8895 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8896 sti; \
8897 sysexit
8898
8899 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8900 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8901 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8902 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8903 +
8904 #else
8905 #define INTERRUPT_RETURN iret
8906 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8907 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8908 index 5478825..839e88c 100644
8909 --- a/arch/x86/include/asm/kprobes.h
8910 +++ b/arch/x86/include/asm/kprobes.h
8911 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8912 #define RELATIVEJUMP_SIZE 5
8913 #define RELATIVECALL_OPCODE 0xe8
8914 #define RELATIVE_ADDR_SIZE 4
8915 -#define MAX_STACK_SIZE 64
8916 -#define MIN_STACK_SIZE(ADDR) \
8917 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8918 - THREAD_SIZE - (unsigned long)(ADDR))) \
8919 - ? (MAX_STACK_SIZE) \
8920 - : (((unsigned long)current_thread_info()) + \
8921 - THREAD_SIZE - (unsigned long)(ADDR)))
8922 +#define MAX_STACK_SIZE 64UL
8923 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8924
8925 #define flush_insn_slot(p) do { } while (0)
8926
8927 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8928 index b4973f4..7c4d3fc 100644
8929 --- a/arch/x86/include/asm/kvm_host.h
8930 +++ b/arch/x86/include/asm/kvm_host.h
8931 @@ -459,7 +459,7 @@ struct kvm_arch {
8932 unsigned int n_requested_mmu_pages;
8933 unsigned int n_max_mmu_pages;
8934 unsigned int indirect_shadow_pages;
8935 - atomic_t invlpg_counter;
8936 + atomic_unchecked_t invlpg_counter;
8937 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8938 /*
8939 * Hash table of struct kvm_mmu_page.
8940 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
8941 int (*check_intercept)(struct kvm_vcpu *vcpu,
8942 struct x86_instruction_info *info,
8943 enum x86_intercept_stage stage);
8944 -};
8945 +} __do_const;
8946
8947 struct kvm_arch_async_pf {
8948 u32 token;
8949 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8950 index 9cdae5d..300d20f 100644
8951 --- a/arch/x86/include/asm/local.h
8952 +++ b/arch/x86/include/asm/local.h
8953 @@ -18,26 +18,58 @@ typedef struct {
8954
8955 static inline void local_inc(local_t *l)
8956 {
8957 - asm volatile(_ASM_INC "%0"
8958 + asm volatile(_ASM_INC "%0\n"
8959 +
8960 +#ifdef CONFIG_PAX_REFCOUNT
8961 + "jno 0f\n"
8962 + _ASM_DEC "%0\n"
8963 + "int $4\n0:\n"
8964 + _ASM_EXTABLE(0b, 0b)
8965 +#endif
8966 +
8967 : "+m" (l->a.counter));
8968 }
8969
8970 static inline void local_dec(local_t *l)
8971 {
8972 - asm volatile(_ASM_DEC "%0"
8973 + asm volatile(_ASM_DEC "%0\n"
8974 +
8975 +#ifdef CONFIG_PAX_REFCOUNT
8976 + "jno 0f\n"
8977 + _ASM_INC "%0\n"
8978 + "int $4\n0:\n"
8979 + _ASM_EXTABLE(0b, 0b)
8980 +#endif
8981 +
8982 : "+m" (l->a.counter));
8983 }
8984
8985 static inline void local_add(long i, local_t *l)
8986 {
8987 - asm volatile(_ASM_ADD "%1,%0"
8988 + asm volatile(_ASM_ADD "%1,%0\n"
8989 +
8990 +#ifdef CONFIG_PAX_REFCOUNT
8991 + "jno 0f\n"
8992 + _ASM_SUB "%1,%0\n"
8993 + "int $4\n0:\n"
8994 + _ASM_EXTABLE(0b, 0b)
8995 +#endif
8996 +
8997 : "+m" (l->a.counter)
8998 : "ir" (i));
8999 }
9000
9001 static inline void local_sub(long i, local_t *l)
9002 {
9003 - asm volatile(_ASM_SUB "%1,%0"
9004 + asm volatile(_ASM_SUB "%1,%0\n"
9005 +
9006 +#ifdef CONFIG_PAX_REFCOUNT
9007 + "jno 0f\n"
9008 + _ASM_ADD "%1,%0\n"
9009 + "int $4\n0:\n"
9010 + _ASM_EXTABLE(0b, 0b)
9011 +#endif
9012 +
9013 : "+m" (l->a.counter)
9014 : "ir" (i));
9015 }
9016 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
9017 {
9018 unsigned char c;
9019
9020 - asm volatile(_ASM_SUB "%2,%0; sete %1"
9021 + asm volatile(_ASM_SUB "%2,%0\n"
9022 +
9023 +#ifdef CONFIG_PAX_REFCOUNT
9024 + "jno 0f\n"
9025 + _ASM_ADD "%2,%0\n"
9026 + "int $4\n0:\n"
9027 + _ASM_EXTABLE(0b, 0b)
9028 +#endif
9029 +
9030 + "sete %1\n"
9031 : "+m" (l->a.counter), "=qm" (c)
9032 : "ir" (i) : "memory");
9033 return c;
9034 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
9035 {
9036 unsigned char c;
9037
9038 - asm volatile(_ASM_DEC "%0; sete %1"
9039 + asm volatile(_ASM_DEC "%0\n"
9040 +
9041 +#ifdef CONFIG_PAX_REFCOUNT
9042 + "jno 0f\n"
9043 + _ASM_INC "%0\n"
9044 + "int $4\n0:\n"
9045 + _ASM_EXTABLE(0b, 0b)
9046 +#endif
9047 +
9048 + "sete %1\n"
9049 : "+m" (l->a.counter), "=qm" (c)
9050 : : "memory");
9051 return c != 0;
9052 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
9053 {
9054 unsigned char c;
9055
9056 - asm volatile(_ASM_INC "%0; sete %1"
9057 + asm volatile(_ASM_INC "%0\n"
9058 +
9059 +#ifdef CONFIG_PAX_REFCOUNT
9060 + "jno 0f\n"
9061 + _ASM_DEC "%0\n"
9062 + "int $4\n0:\n"
9063 + _ASM_EXTABLE(0b, 0b)
9064 +#endif
9065 +
9066 + "sete %1\n"
9067 : "+m" (l->a.counter), "=qm" (c)
9068 : : "memory");
9069 return c != 0;
9070 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
9071 {
9072 unsigned char c;
9073
9074 - asm volatile(_ASM_ADD "%2,%0; sets %1"
9075 + asm volatile(_ASM_ADD "%2,%0\n"
9076 +
9077 +#ifdef CONFIG_PAX_REFCOUNT
9078 + "jno 0f\n"
9079 + _ASM_SUB "%2,%0\n"
9080 + "int $4\n0:\n"
9081 + _ASM_EXTABLE(0b, 0b)
9082 +#endif
9083 +
9084 + "sets %1\n"
9085 : "+m" (l->a.counter), "=qm" (c)
9086 : "ir" (i) : "memory");
9087 return c;
9088 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
9089 #endif
9090 /* Modern 486+ processor */
9091 __i = i;
9092 - asm volatile(_ASM_XADD "%0, %1;"
9093 + asm volatile(_ASM_XADD "%0, %1\n"
9094 +
9095 +#ifdef CONFIG_PAX_REFCOUNT
9096 + "jno 0f\n"
9097 + _ASM_MOV "%0,%1\n"
9098 + "int $4\n0:\n"
9099 + _ASM_EXTABLE(0b, 0b)
9100 +#endif
9101 +
9102 : "+r" (i), "+m" (l->a.counter)
9103 : : "memory");
9104 return i + __i;
9105 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
9106 index 593e51d..fa69c9a 100644
9107 --- a/arch/x86/include/asm/mman.h
9108 +++ b/arch/x86/include/asm/mman.h
9109 @@ -5,4 +5,14 @@
9110
9111 #include <asm-generic/mman.h>
9112
9113 +#ifdef __KERNEL__
9114 +#ifndef __ASSEMBLY__
9115 +#ifdef CONFIG_X86_32
9116 +#define arch_mmap_check i386_mmap_check
9117 +int i386_mmap_check(unsigned long addr, unsigned long len,
9118 + unsigned long flags);
9119 +#endif
9120 +#endif
9121 +#endif
9122 +
9123 #endif /* _ASM_X86_MMAN_H */
9124 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
9125 index 5f55e69..e20bfb1 100644
9126 --- a/arch/x86/include/asm/mmu.h
9127 +++ b/arch/x86/include/asm/mmu.h
9128 @@ -9,7 +9,7 @@
9129 * we put the segment information here.
9130 */
9131 typedef struct {
9132 - void *ldt;
9133 + struct desc_struct *ldt;
9134 int size;
9135
9136 #ifdef CONFIG_X86_64
9137 @@ -18,7 +18,19 @@ typedef struct {
9138 #endif
9139
9140 struct mutex lock;
9141 - void *vdso;
9142 + unsigned long vdso;
9143 +
9144 +#ifdef CONFIG_X86_32
9145 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9146 + unsigned long user_cs_base;
9147 + unsigned long user_cs_limit;
9148 +
9149 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9150 + cpumask_t cpu_user_cs_mask;
9151 +#endif
9152 +
9153 +#endif
9154 +#endif
9155 } mm_context_t;
9156
9157 #ifdef CONFIG_SMP
9158 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
9159 index 6902152..399f3a2 100644
9160 --- a/arch/x86/include/asm/mmu_context.h
9161 +++ b/arch/x86/include/asm/mmu_context.h
9162 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
9163
9164 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9165 {
9166 +
9167 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9168 + unsigned int i;
9169 + pgd_t *pgd;
9170 +
9171 + pax_open_kernel();
9172 + pgd = get_cpu_pgd(smp_processor_id());
9173 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9174 + set_pgd_batched(pgd+i, native_make_pgd(0));
9175 + pax_close_kernel();
9176 +#endif
9177 +
9178 #ifdef CONFIG_SMP
9179 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9180 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9181 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9182 struct task_struct *tsk)
9183 {
9184 unsigned cpu = smp_processor_id();
9185 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9186 + int tlbstate = TLBSTATE_OK;
9187 +#endif
9188
9189 if (likely(prev != next)) {
9190 #ifdef CONFIG_SMP
9191 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9192 + tlbstate = percpu_read(cpu_tlbstate.state);
9193 +#endif
9194 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9195 percpu_write(cpu_tlbstate.active_mm, next);
9196 #endif
9197 cpumask_set_cpu(cpu, mm_cpumask(next));
9198
9199 /* Re-load page tables */
9200 +#ifdef CONFIG_PAX_PER_CPU_PGD
9201 + pax_open_kernel();
9202 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9203 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9204 + pax_close_kernel();
9205 + load_cr3(get_cpu_pgd(cpu));
9206 +#else
9207 load_cr3(next->pgd);
9208 +#endif
9209
9210 /* stop flush ipis for the previous mm */
9211 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9212 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9213 */
9214 if (unlikely(prev->context.ldt != next->context.ldt))
9215 load_LDT_nolock(&next->context);
9216 - }
9217 +
9218 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9219 + if (!(__supported_pte_mask & _PAGE_NX)) {
9220 + smp_mb__before_clear_bit();
9221 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9222 + smp_mb__after_clear_bit();
9223 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9224 + }
9225 +#endif
9226 +
9227 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9228 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9229 + prev->context.user_cs_limit != next->context.user_cs_limit))
9230 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9231 #ifdef CONFIG_SMP
9232 + else if (unlikely(tlbstate != TLBSTATE_OK))
9233 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9234 +#endif
9235 +#endif
9236 +
9237 + }
9238 else {
9239 +
9240 +#ifdef CONFIG_PAX_PER_CPU_PGD
9241 + pax_open_kernel();
9242 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9243 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9244 + pax_close_kernel();
9245 + load_cr3(get_cpu_pgd(cpu));
9246 +#endif
9247 +
9248 +#ifdef CONFIG_SMP
9249 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9250 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9251
9252 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9253 * tlb flush IPI delivery. We must reload CR3
9254 * to make sure to use no freed page tables.
9255 */
9256 +
9257 +#ifndef CONFIG_PAX_PER_CPU_PGD
9258 load_cr3(next->pgd);
9259 +#endif
9260 +
9261 load_LDT_nolock(&next->context);
9262 +
9263 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9264 + if (!(__supported_pte_mask & _PAGE_NX))
9265 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9266 +#endif
9267 +
9268 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9269 +#ifdef CONFIG_PAX_PAGEEXEC
9270 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9271 +#endif
9272 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9273 +#endif
9274 +
9275 }
9276 +#endif
9277 }
9278 -#endif
9279 }
9280
9281 #define activate_mm(prev, next) \
9282 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9283 index 9eae775..c914fea 100644
9284 --- a/arch/x86/include/asm/module.h
9285 +++ b/arch/x86/include/asm/module.h
9286 @@ -5,6 +5,7 @@
9287
9288 #ifdef CONFIG_X86_64
9289 /* X86_64 does not define MODULE_PROC_FAMILY */
9290 +#define MODULE_PROC_FAMILY ""
9291 #elif defined CONFIG_M386
9292 #define MODULE_PROC_FAMILY "386 "
9293 #elif defined CONFIG_M486
9294 @@ -59,8 +60,20 @@
9295 #error unknown processor family
9296 #endif
9297
9298 -#ifdef CONFIG_X86_32
9299 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9300 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9301 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9302 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9303 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9304 +#else
9305 +#define MODULE_PAX_KERNEXEC ""
9306 #endif
9307
9308 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9309 +#define MODULE_PAX_UDEREF "UDEREF "
9310 +#else
9311 +#define MODULE_PAX_UDEREF ""
9312 +#endif
9313 +
9314 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9315 +
9316 #endif /* _ASM_X86_MODULE_H */
9317 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9318 index 7639dbf..e08a58c 100644
9319 --- a/arch/x86/include/asm/page_64_types.h
9320 +++ b/arch/x86/include/asm/page_64_types.h
9321 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9322
9323 /* duplicated to the one in bootmem.h */
9324 extern unsigned long max_pfn;
9325 -extern unsigned long phys_base;
9326 +extern const unsigned long phys_base;
9327
9328 extern unsigned long __phys_addr(unsigned long);
9329 #define __phys_reloc_hide(x) (x)
9330 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9331 index a7d2db9..edb023e 100644
9332 --- a/arch/x86/include/asm/paravirt.h
9333 +++ b/arch/x86/include/asm/paravirt.h
9334 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9335 val);
9336 }
9337
9338 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9339 +{
9340 + pgdval_t val = native_pgd_val(pgd);
9341 +
9342 + if (sizeof(pgdval_t) > sizeof(long))
9343 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9344 + val, (u64)val >> 32);
9345 + else
9346 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9347 + val);
9348 +}
9349 +
9350 static inline void pgd_clear(pgd_t *pgdp)
9351 {
9352 set_pgd(pgdp, __pgd(0));
9353 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9354 pv_mmu_ops.set_fixmap(idx, phys, flags);
9355 }
9356
9357 +#ifdef CONFIG_PAX_KERNEXEC
9358 +static inline unsigned long pax_open_kernel(void)
9359 +{
9360 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9361 +}
9362 +
9363 +static inline unsigned long pax_close_kernel(void)
9364 +{
9365 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9366 +}
9367 +#else
9368 +static inline unsigned long pax_open_kernel(void) { return 0; }
9369 +static inline unsigned long pax_close_kernel(void) { return 0; }
9370 +#endif
9371 +
9372 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9373
9374 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9375 @@ -964,7 +991,7 @@ extern void default_banner(void);
9376
9377 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9378 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9379 -#define PARA_INDIRECT(addr) *%cs:addr
9380 +#define PARA_INDIRECT(addr) *%ss:addr
9381 #endif
9382
9383 #define INTERRUPT_RETURN \
9384 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9385 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9386 CLBR_NONE, \
9387 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9388 +
9389 +#define GET_CR0_INTO_RDI \
9390 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9391 + mov %rax,%rdi
9392 +
9393 +#define SET_RDI_INTO_CR0 \
9394 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9395 +
9396 +#define GET_CR3_INTO_RDI \
9397 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9398 + mov %rax,%rdi
9399 +
9400 +#define SET_RDI_INTO_CR3 \
9401 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9402 +
9403 #endif /* CONFIG_X86_32 */
9404
9405 #endif /* __ASSEMBLY__ */
9406 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9407 index 8e8b9a4..f07d725 100644
9408 --- a/arch/x86/include/asm/paravirt_types.h
9409 +++ b/arch/x86/include/asm/paravirt_types.h
9410 @@ -84,20 +84,20 @@ struct pv_init_ops {
9411 */
9412 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9413 unsigned long addr, unsigned len);
9414 -};
9415 +} __no_const;
9416
9417
9418 struct pv_lazy_ops {
9419 /* Set deferred update mode, used for batching operations. */
9420 void (*enter)(void);
9421 void (*leave)(void);
9422 -};
9423 +} __no_const;
9424
9425 struct pv_time_ops {
9426 unsigned long long (*sched_clock)(void);
9427 unsigned long long (*steal_clock)(int cpu);
9428 unsigned long (*get_tsc_khz)(void);
9429 -};
9430 +} __no_const;
9431
9432 struct pv_cpu_ops {
9433 /* hooks for various privileged instructions */
9434 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9435
9436 void (*start_context_switch)(struct task_struct *prev);
9437 void (*end_context_switch)(struct task_struct *next);
9438 -};
9439 +} __no_const;
9440
9441 struct pv_irq_ops {
9442 /*
9443 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9444 unsigned long start_eip,
9445 unsigned long start_esp);
9446 #endif
9447 -};
9448 +} __no_const;
9449
9450 struct pv_mmu_ops {
9451 unsigned long (*read_cr2)(void);
9452 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9453 struct paravirt_callee_save make_pud;
9454
9455 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9456 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9457 #endif /* PAGETABLE_LEVELS == 4 */
9458 #endif /* PAGETABLE_LEVELS >= 3 */
9459
9460 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9461 an mfn. We can tell which is which from the index. */
9462 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9463 phys_addr_t phys, pgprot_t flags);
9464 +
9465 +#ifdef CONFIG_PAX_KERNEXEC
9466 + unsigned long (*pax_open_kernel)(void);
9467 + unsigned long (*pax_close_kernel)(void);
9468 +#endif
9469 +
9470 };
9471
9472 struct arch_spinlock;
9473 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9474 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9475 int (*spin_trylock)(struct arch_spinlock *lock);
9476 void (*spin_unlock)(struct arch_spinlock *lock);
9477 -};
9478 +} __no_const;
9479
9480 /* This contains all the paravirt structures: we get a convenient
9481 * number for each function using the offset which we use to indicate
9482 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9483 index b4389a4..b7ff22c 100644
9484 --- a/arch/x86/include/asm/pgalloc.h
9485 +++ b/arch/x86/include/asm/pgalloc.h
9486 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9487 pmd_t *pmd, pte_t *pte)
9488 {
9489 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9490 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9491 +}
9492 +
9493 +static inline void pmd_populate_user(struct mm_struct *mm,
9494 + pmd_t *pmd, pte_t *pte)
9495 +{
9496 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9497 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9498 }
9499
9500 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9501 index 98391db..8f6984e 100644
9502 --- a/arch/x86/include/asm/pgtable-2level.h
9503 +++ b/arch/x86/include/asm/pgtable-2level.h
9504 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9505
9506 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9507 {
9508 + pax_open_kernel();
9509 *pmdp = pmd;
9510 + pax_close_kernel();
9511 }
9512
9513 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9514 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9515 index effff47..f9e4035 100644
9516 --- a/arch/x86/include/asm/pgtable-3level.h
9517 +++ b/arch/x86/include/asm/pgtable-3level.h
9518 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9519
9520 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9521 {
9522 + pax_open_kernel();
9523 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9524 + pax_close_kernel();
9525 }
9526
9527 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9528 {
9529 + pax_open_kernel();
9530 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9531 + pax_close_kernel();
9532 }
9533
9534 /*
9535 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9536 index 18601c8..3d716d1 100644
9537 --- a/arch/x86/include/asm/pgtable.h
9538 +++ b/arch/x86/include/asm/pgtable.h
9539 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9540
9541 #ifndef __PAGETABLE_PUD_FOLDED
9542 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9543 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9544 #define pgd_clear(pgd) native_pgd_clear(pgd)
9545 #endif
9546
9547 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9548
9549 #define arch_end_context_switch(prev) do {} while(0)
9550
9551 +#define pax_open_kernel() native_pax_open_kernel()
9552 +#define pax_close_kernel() native_pax_close_kernel()
9553 #endif /* CONFIG_PARAVIRT */
9554
9555 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9556 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9557 +
9558 +#ifdef CONFIG_PAX_KERNEXEC
9559 +static inline unsigned long native_pax_open_kernel(void)
9560 +{
9561 + unsigned long cr0;
9562 +
9563 + preempt_disable();
9564 + barrier();
9565 + cr0 = read_cr0() ^ X86_CR0_WP;
9566 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9567 + write_cr0(cr0);
9568 + return cr0 ^ X86_CR0_WP;
9569 +}
9570 +
9571 +static inline unsigned long native_pax_close_kernel(void)
9572 +{
9573 + unsigned long cr0;
9574 +
9575 + cr0 = read_cr0() ^ X86_CR0_WP;
9576 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9577 + write_cr0(cr0);
9578 + barrier();
9579 + preempt_enable_no_resched();
9580 + return cr0 ^ X86_CR0_WP;
9581 +}
9582 +#else
9583 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9584 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9585 +#endif
9586 +
9587 /*
9588 * The following only work if pte_present() is true.
9589 * Undefined behaviour if not..
9590 */
9591 +static inline int pte_user(pte_t pte)
9592 +{
9593 + return pte_val(pte) & _PAGE_USER;
9594 +}
9595 +
9596 static inline int pte_dirty(pte_t pte)
9597 {
9598 return pte_flags(pte) & _PAGE_DIRTY;
9599 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9600 return pte_clear_flags(pte, _PAGE_RW);
9601 }
9602
9603 +static inline pte_t pte_mkread(pte_t pte)
9604 +{
9605 + return __pte(pte_val(pte) | _PAGE_USER);
9606 +}
9607 +
9608 static inline pte_t pte_mkexec(pte_t pte)
9609 {
9610 - return pte_clear_flags(pte, _PAGE_NX);
9611 +#ifdef CONFIG_X86_PAE
9612 + if (__supported_pte_mask & _PAGE_NX)
9613 + return pte_clear_flags(pte, _PAGE_NX);
9614 + else
9615 +#endif
9616 + return pte_set_flags(pte, _PAGE_USER);
9617 +}
9618 +
9619 +static inline pte_t pte_exprotect(pte_t pte)
9620 +{
9621 +#ifdef CONFIG_X86_PAE
9622 + if (__supported_pte_mask & _PAGE_NX)
9623 + return pte_set_flags(pte, _PAGE_NX);
9624 + else
9625 +#endif
9626 + return pte_clear_flags(pte, _PAGE_USER);
9627 }
9628
9629 static inline pte_t pte_mkdirty(pte_t pte)
9630 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9631 #endif
9632
9633 #ifndef __ASSEMBLY__
9634 +
9635 +#ifdef CONFIG_PAX_PER_CPU_PGD
9636 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9637 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9638 +{
9639 + return cpu_pgd[cpu];
9640 +}
9641 +#endif
9642 +
9643 #include <linux/mm_types.h>
9644
9645 static inline int pte_none(pte_t pte)
9646 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9647
9648 static inline int pgd_bad(pgd_t pgd)
9649 {
9650 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9651 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9652 }
9653
9654 static inline int pgd_none(pgd_t pgd)
9655 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9656 * pgd_offset() returns a (pgd_t *)
9657 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9658 */
9659 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9660 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9661 +
9662 +#ifdef CONFIG_PAX_PER_CPU_PGD
9663 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9664 +#endif
9665 +
9666 /*
9667 * a shortcut which implies the use of the kernel's pgd, instead
9668 * of a process's
9669 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9670 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9671 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9672
9673 +#ifdef CONFIG_X86_32
9674 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9675 +#else
9676 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9677 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9678 +
9679 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9680 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9681 +#else
9682 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9683 +#endif
9684 +
9685 +#endif
9686 +
9687 #ifndef __ASSEMBLY__
9688
9689 extern int direct_gbpages;
9690 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9691 * dst and src can be on the same page, but the range must not overlap,
9692 * and must not cross a page boundary.
9693 */
9694 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9695 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9696 {
9697 - memcpy(dst, src, count * sizeof(pgd_t));
9698 + pax_open_kernel();
9699 + while (count--)
9700 + *dst++ = *src++;
9701 + pax_close_kernel();
9702 }
9703
9704 +#ifdef CONFIG_PAX_PER_CPU_PGD
9705 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9706 +#endif
9707 +
9708 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9709 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9710 +#else
9711 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9712 +#endif
9713
9714 #include <asm-generic/pgtable.h>
9715 #endif /* __ASSEMBLY__ */
9716 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9717 index 0c92113..34a77c6 100644
9718 --- a/arch/x86/include/asm/pgtable_32.h
9719 +++ b/arch/x86/include/asm/pgtable_32.h
9720 @@ -25,9 +25,6 @@
9721 struct mm_struct;
9722 struct vm_area_struct;
9723
9724 -extern pgd_t swapper_pg_dir[1024];
9725 -extern pgd_t initial_page_table[1024];
9726 -
9727 static inline void pgtable_cache_init(void) { }
9728 static inline void check_pgt_cache(void) { }
9729 void paging_init(void);
9730 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9731 # include <asm/pgtable-2level.h>
9732 #endif
9733
9734 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9735 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9736 +#ifdef CONFIG_X86_PAE
9737 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9738 +#endif
9739 +
9740 #if defined(CONFIG_HIGHPTE)
9741 #define pte_offset_map(dir, address) \
9742 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9743 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9744 /* Clear a kernel PTE and flush it from the TLB */
9745 #define kpte_clear_flush(ptep, vaddr) \
9746 do { \
9747 + pax_open_kernel(); \
9748 pte_clear(&init_mm, (vaddr), (ptep)); \
9749 + pax_close_kernel(); \
9750 __flush_tlb_one((vaddr)); \
9751 } while (0)
9752
9753 @@ -74,6 +79,9 @@ do { \
9754
9755 #endif /* !__ASSEMBLY__ */
9756
9757 +#define HAVE_ARCH_UNMAPPED_AREA
9758 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9759 +
9760 /*
9761 * kern_addr_valid() is (1) for FLATMEM and (0) for
9762 * SPARSEMEM and DISCONTIGMEM
9763 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9764 index ed5903b..c7fe163 100644
9765 --- a/arch/x86/include/asm/pgtable_32_types.h
9766 +++ b/arch/x86/include/asm/pgtable_32_types.h
9767 @@ -8,7 +8,7 @@
9768 */
9769 #ifdef CONFIG_X86_PAE
9770 # include <asm/pgtable-3level_types.h>
9771 -# define PMD_SIZE (1UL << PMD_SHIFT)
9772 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9773 # define PMD_MASK (~(PMD_SIZE - 1))
9774 #else
9775 # include <asm/pgtable-2level_types.h>
9776 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9777 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9778 #endif
9779
9780 +#ifdef CONFIG_PAX_KERNEXEC
9781 +#ifndef __ASSEMBLY__
9782 +extern unsigned char MODULES_EXEC_VADDR[];
9783 +extern unsigned char MODULES_EXEC_END[];
9784 +#endif
9785 +#include <asm/boot.h>
9786 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9787 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9788 +#else
9789 +#define ktla_ktva(addr) (addr)
9790 +#define ktva_ktla(addr) (addr)
9791 +#endif
9792 +
9793 #define MODULES_VADDR VMALLOC_START
9794 #define MODULES_END VMALLOC_END
9795 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9796 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9797 index 975f709..107976d 100644
9798 --- a/arch/x86/include/asm/pgtable_64.h
9799 +++ b/arch/x86/include/asm/pgtable_64.h
9800 @@ -16,10 +16,14 @@
9801
9802 extern pud_t level3_kernel_pgt[512];
9803 extern pud_t level3_ident_pgt[512];
9804 +extern pud_t level3_vmalloc_start_pgt[512];
9805 +extern pud_t level3_vmalloc_end_pgt[512];
9806 +extern pud_t level3_vmemmap_pgt[512];
9807 +extern pud_t level2_vmemmap_pgt[512];
9808 extern pmd_t level2_kernel_pgt[512];
9809 extern pmd_t level2_fixmap_pgt[512];
9810 -extern pmd_t level2_ident_pgt[512];
9811 -extern pgd_t init_level4_pgt[];
9812 +extern pmd_t level2_ident_pgt[512*2];
9813 +extern pgd_t init_level4_pgt[512];
9814
9815 #define swapper_pg_dir init_level4_pgt
9816
9817 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9818
9819 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9820 {
9821 + pax_open_kernel();
9822 *pmdp = pmd;
9823 + pax_close_kernel();
9824 }
9825
9826 static inline void native_pmd_clear(pmd_t *pmd)
9827 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9828
9829 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9830 {
9831 + pax_open_kernel();
9832 + *pgdp = pgd;
9833 + pax_close_kernel();
9834 +}
9835 +
9836 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9837 +{
9838 *pgdp = pgd;
9839 }
9840
9841 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9842 index 766ea16..5b96cb3 100644
9843 --- a/arch/x86/include/asm/pgtable_64_types.h
9844 +++ b/arch/x86/include/asm/pgtable_64_types.h
9845 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9846 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9847 #define MODULES_END _AC(0xffffffffff000000, UL)
9848 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9849 +#define MODULES_EXEC_VADDR MODULES_VADDR
9850 +#define MODULES_EXEC_END MODULES_END
9851 +
9852 +#define ktla_ktva(addr) (addr)
9853 +#define ktva_ktla(addr) (addr)
9854
9855 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9856 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9857 index 013286a..8b42f4f 100644
9858 --- a/arch/x86/include/asm/pgtable_types.h
9859 +++ b/arch/x86/include/asm/pgtable_types.h
9860 @@ -16,13 +16,12 @@
9861 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9862 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9863 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9864 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9865 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9866 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9867 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9868 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9869 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9870 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9871 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9872 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9873 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9874 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9875
9876 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9877 @@ -40,7 +39,6 @@
9878 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9879 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9880 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9881 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9882 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9883 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9884 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9885 @@ -57,8 +55,10 @@
9886
9887 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9888 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9889 -#else
9890 +#elif defined(CONFIG_KMEMCHECK)
9891 #define _PAGE_NX (_AT(pteval_t, 0))
9892 +#else
9893 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9894 #endif
9895
9896 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9897 @@ -96,6 +96,9 @@
9898 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9899 _PAGE_ACCESSED)
9900
9901 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9902 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9903 +
9904 #define __PAGE_KERNEL_EXEC \
9905 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9906 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9907 @@ -106,7 +109,7 @@
9908 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9909 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9910 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9911 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9912 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9913 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9914 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9915 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9916 @@ -168,8 +171,8 @@
9917 * bits are combined, this will alow user to access the high address mapped
9918 * VDSO in the presence of CONFIG_COMPAT_VDSO
9919 */
9920 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9921 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9922 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9923 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9924 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9925 #endif
9926
9927 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9928 {
9929 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9930 }
9931 +#endif
9932
9933 +#if PAGETABLE_LEVELS == 3
9934 +#include <asm-generic/pgtable-nopud.h>
9935 +#endif
9936 +
9937 +#if PAGETABLE_LEVELS == 2
9938 +#include <asm-generic/pgtable-nopmd.h>
9939 +#endif
9940 +
9941 +#ifndef __ASSEMBLY__
9942 #if PAGETABLE_LEVELS > 3
9943 typedef struct { pudval_t pud; } pud_t;
9944
9945 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9946 return pud.pud;
9947 }
9948 #else
9949 -#include <asm-generic/pgtable-nopud.h>
9950 -
9951 static inline pudval_t native_pud_val(pud_t pud)
9952 {
9953 return native_pgd_val(pud.pgd);
9954 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9955 return pmd.pmd;
9956 }
9957 #else
9958 -#include <asm-generic/pgtable-nopmd.h>
9959 -
9960 static inline pmdval_t native_pmd_val(pmd_t pmd)
9961 {
9962 return native_pgd_val(pmd.pud.pgd);
9963 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9964
9965 extern pteval_t __supported_pte_mask;
9966 extern void set_nx(void);
9967 -extern int nx_enabled;
9968
9969 #define pgprot_writecombine pgprot_writecombine
9970 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9971 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9972 index bb3ee36..781a6b8 100644
9973 --- a/arch/x86/include/asm/processor.h
9974 +++ b/arch/x86/include/asm/processor.h
9975 @@ -268,7 +268,7 @@ struct tss_struct {
9976
9977 } ____cacheline_aligned;
9978
9979 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9980 +extern struct tss_struct init_tss[NR_CPUS];
9981
9982 /*
9983 * Save the original ist values for checking stack pointers during debugging
9984 @@ -861,11 +861,18 @@ static inline void spin_lock_prefetch(const void *x)
9985 */
9986 #define TASK_SIZE PAGE_OFFSET
9987 #define TASK_SIZE_MAX TASK_SIZE
9988 +
9989 +#ifdef CONFIG_PAX_SEGMEXEC
9990 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9991 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9992 +#else
9993 #define STACK_TOP TASK_SIZE
9994 -#define STACK_TOP_MAX STACK_TOP
9995 +#endif
9996 +
9997 +#define STACK_TOP_MAX TASK_SIZE
9998
9999 #define INIT_THREAD { \
10000 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10001 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10002 .vm86_info = NULL, \
10003 .sysenter_cs = __KERNEL_CS, \
10004 .io_bitmap_ptr = NULL, \
10005 @@ -879,7 +886,7 @@ static inline void spin_lock_prefetch(const void *x)
10006 */
10007 #define INIT_TSS { \
10008 .x86_tss = { \
10009 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10010 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10011 .ss0 = __KERNEL_DS, \
10012 .ss1 = __KERNEL_CS, \
10013 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10014 @@ -890,11 +897,7 @@ static inline void spin_lock_prefetch(const void *x)
10015 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10016
10017 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10018 -#define KSTK_TOP(info) \
10019 -({ \
10020 - unsigned long *__ptr = (unsigned long *)(info); \
10021 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10022 -})
10023 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10024
10025 /*
10026 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10027 @@ -909,7 +912,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10028 #define task_pt_regs(task) \
10029 ({ \
10030 struct pt_regs *__regs__; \
10031 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10032 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10033 __regs__ - 1; \
10034 })
10035
10036 @@ -919,13 +922,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10037 /*
10038 * User space process size. 47bits minus one guard page.
10039 */
10040 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10041 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10042
10043 /* This decides where the kernel will search for a free chunk of vm
10044 * space during mmap's.
10045 */
10046 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10047 - 0xc0000000 : 0xFFFFe000)
10048 + 0xc0000000 : 0xFFFFf000)
10049
10050 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10051 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10052 @@ -936,11 +939,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10053 #define STACK_TOP_MAX TASK_SIZE_MAX
10054
10055 #define INIT_THREAD { \
10056 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10057 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10058 }
10059
10060 #define INIT_TSS { \
10061 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10062 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10063 }
10064
10065 /*
10066 @@ -962,6 +965,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
10067 */
10068 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10069
10070 +#ifdef CONFIG_PAX_SEGMEXEC
10071 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10072 +#endif
10073 +
10074 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10075
10076 /* Get/set a process' ability to use the timestamp counter instruction */
10077 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
10078 index 3566454..4bdfb8c 100644
10079 --- a/arch/x86/include/asm/ptrace.h
10080 +++ b/arch/x86/include/asm/ptrace.h
10081 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
10082 }
10083
10084 /*
10085 - * user_mode_vm(regs) determines whether a register set came from user mode.
10086 + * user_mode(regs) determines whether a register set came from user mode.
10087 * This is true if V8086 mode was enabled OR if the register set was from
10088 * protected mode with RPL-3 CS value. This tricky test checks that with
10089 * one comparison. Many places in the kernel can bypass this full check
10090 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10091 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10092 + * be used.
10093 */
10094 -static inline int user_mode(struct pt_regs *regs)
10095 +static inline int user_mode_novm(struct pt_regs *regs)
10096 {
10097 #ifdef CONFIG_X86_32
10098 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10099 #else
10100 - return !!(regs->cs & 3);
10101 + return !!(regs->cs & SEGMENT_RPL_MASK);
10102 #endif
10103 }
10104
10105 -static inline int user_mode_vm(struct pt_regs *regs)
10106 +static inline int user_mode(struct pt_regs *regs)
10107 {
10108 #ifdef CONFIG_X86_32
10109 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10110 USER_RPL;
10111 #else
10112 - return user_mode(regs);
10113 + return user_mode_novm(regs);
10114 #endif
10115 }
10116
10117 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
10118 #ifdef CONFIG_X86_64
10119 static inline bool user_64bit_mode(struct pt_regs *regs)
10120 {
10121 + unsigned long cs = regs->cs & 0xffff;
10122 #ifndef CONFIG_PARAVIRT
10123 /*
10124 * On non-paravirt systems, this is the only long mode CPL 3
10125 * selector. We do not allow long mode selectors in the LDT.
10126 */
10127 - return regs->cs == __USER_CS;
10128 + return cs == __USER_CS;
10129 #else
10130 /* Headers are too twisted for this to go in paravirt.h. */
10131 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
10132 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
10133 #endif
10134 }
10135 #endif
10136 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
10137 index 92f29706..a79cbbb 100644
10138 --- a/arch/x86/include/asm/reboot.h
10139 +++ b/arch/x86/include/asm/reboot.h
10140 @@ -6,19 +6,19 @@
10141 struct pt_regs;
10142
10143 struct machine_ops {
10144 - void (*restart)(char *cmd);
10145 - void (*halt)(void);
10146 - void (*power_off)(void);
10147 + void (* __noreturn restart)(char *cmd);
10148 + void (* __noreturn halt)(void);
10149 + void (* __noreturn power_off)(void);
10150 void (*shutdown)(void);
10151 void (*crash_shutdown)(struct pt_regs *);
10152 - void (*emergency_restart)(void);
10153 -};
10154 + void (* __noreturn emergency_restart)(void);
10155 +} __no_const;
10156
10157 extern struct machine_ops machine_ops;
10158
10159 void native_machine_crash_shutdown(struct pt_regs *regs);
10160 void native_machine_shutdown(void);
10161 -void machine_real_restart(unsigned int type);
10162 +void machine_real_restart(unsigned int type) __noreturn;
10163 /* These must match dispatch_table in reboot_32.S */
10164 #define MRR_BIOS 0
10165 #define MRR_APM 1
10166 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
10167 index 2dbe4a7..ce1db00 100644
10168 --- a/arch/x86/include/asm/rwsem.h
10169 +++ b/arch/x86/include/asm/rwsem.h
10170 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
10171 {
10172 asm volatile("# beginning down_read\n\t"
10173 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10174 +
10175 +#ifdef CONFIG_PAX_REFCOUNT
10176 + "jno 0f\n"
10177 + LOCK_PREFIX _ASM_DEC "(%1)\n"
10178 + "int $4\n0:\n"
10179 + _ASM_EXTABLE(0b, 0b)
10180 +#endif
10181 +
10182 /* adds 0x00000001 */
10183 " jns 1f\n"
10184 " call call_rwsem_down_read_failed\n"
10185 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
10186 "1:\n\t"
10187 " mov %1,%2\n\t"
10188 " add %3,%2\n\t"
10189 +
10190 +#ifdef CONFIG_PAX_REFCOUNT
10191 + "jno 0f\n"
10192 + "sub %3,%2\n"
10193 + "int $4\n0:\n"
10194 + _ASM_EXTABLE(0b, 0b)
10195 +#endif
10196 +
10197 " jle 2f\n\t"
10198 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10199 " jnz 1b\n\t"
10200 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
10201 long tmp;
10202 asm volatile("# beginning down_write\n\t"
10203 LOCK_PREFIX " xadd %1,(%2)\n\t"
10204 +
10205 +#ifdef CONFIG_PAX_REFCOUNT
10206 + "jno 0f\n"
10207 + "mov %1,(%2)\n"
10208 + "int $4\n0:\n"
10209 + _ASM_EXTABLE(0b, 0b)
10210 +#endif
10211 +
10212 /* adds 0xffff0001, returns the old value */
10213 " test %1,%1\n\t"
10214 /* was the count 0 before? */
10215 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
10216 long tmp;
10217 asm volatile("# beginning __up_read\n\t"
10218 LOCK_PREFIX " xadd %1,(%2)\n\t"
10219 +
10220 +#ifdef CONFIG_PAX_REFCOUNT
10221 + "jno 0f\n"
10222 + "mov %1,(%2)\n"
10223 + "int $4\n0:\n"
10224 + _ASM_EXTABLE(0b, 0b)
10225 +#endif
10226 +
10227 /* subtracts 1, returns the old value */
10228 " jns 1f\n\t"
10229 " call call_rwsem_wake\n" /* expects old value in %edx */
10230 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
10231 long tmp;
10232 asm volatile("# beginning __up_write\n\t"
10233 LOCK_PREFIX " xadd %1,(%2)\n\t"
10234 +
10235 +#ifdef CONFIG_PAX_REFCOUNT
10236 + "jno 0f\n"
10237 + "mov %1,(%2)\n"
10238 + "int $4\n0:\n"
10239 + _ASM_EXTABLE(0b, 0b)
10240 +#endif
10241 +
10242 /* subtracts 0xffff0001, returns the old value */
10243 " jns 1f\n\t"
10244 " call call_rwsem_wake\n" /* expects old value in %edx */
10245 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10246 {
10247 asm volatile("# beginning __downgrade_write\n\t"
10248 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10249 +
10250 +#ifdef CONFIG_PAX_REFCOUNT
10251 + "jno 0f\n"
10252 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10253 + "int $4\n0:\n"
10254 + _ASM_EXTABLE(0b, 0b)
10255 +#endif
10256 +
10257 /*
10258 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10259 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10260 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10261 */
10262 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10263 {
10264 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10265 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10266 +
10267 +#ifdef CONFIG_PAX_REFCOUNT
10268 + "jno 0f\n"
10269 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10270 + "int $4\n0:\n"
10271 + _ASM_EXTABLE(0b, 0b)
10272 +#endif
10273 +
10274 : "+m" (sem->count)
10275 : "er" (delta));
10276 }
10277 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10278 */
10279 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10280 {
10281 - return delta + xadd(&sem->count, delta);
10282 + return delta + xadd_check_overflow(&sem->count, delta);
10283 }
10284
10285 #endif /* __KERNEL__ */
10286 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10287 index 5e64171..f58957e 100644
10288 --- a/arch/x86/include/asm/segment.h
10289 +++ b/arch/x86/include/asm/segment.h
10290 @@ -64,10 +64,15 @@
10291 * 26 - ESPFIX small SS
10292 * 27 - per-cpu [ offset to per-cpu data area ]
10293 * 28 - stack_canary-20 [ for stack protector ]
10294 - * 29 - unused
10295 - * 30 - unused
10296 + * 29 - PCI BIOS CS
10297 + * 30 - PCI BIOS DS
10298 * 31 - TSS for double fault handler
10299 */
10300 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10301 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10302 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10303 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10304 +
10305 #define GDT_ENTRY_TLS_MIN 6
10306 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10307
10308 @@ -79,6 +84,8 @@
10309
10310 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10311
10312 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10313 +
10314 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10315
10316 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10317 @@ -104,6 +111,12 @@
10318 #define __KERNEL_STACK_CANARY 0
10319 #endif
10320
10321 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10322 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10323 +
10324 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10325 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10326 +
10327 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10328
10329 /*
10330 @@ -141,7 +154,7 @@
10331 */
10332
10333 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10334 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10335 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10336
10337
10338 #else
10339 @@ -165,6 +178,8 @@
10340 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10341 #define __USER32_DS __USER_DS
10342
10343 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10344 +
10345 #define GDT_ENTRY_TSS 8 /* needs two entries */
10346 #define GDT_ENTRY_LDT 10 /* needs two entries */
10347 #define GDT_ENTRY_TLS_MIN 12
10348 @@ -185,6 +200,7 @@
10349 #endif
10350
10351 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10352 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10353 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10354 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10355 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10356 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10357 index 73b11bc..d4a3b63 100644
10358 --- a/arch/x86/include/asm/smp.h
10359 +++ b/arch/x86/include/asm/smp.h
10360 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10361 /* cpus sharing the last level cache: */
10362 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10363 DECLARE_PER_CPU(u16, cpu_llc_id);
10364 -DECLARE_PER_CPU(int, cpu_number);
10365 +DECLARE_PER_CPU(unsigned int, cpu_number);
10366
10367 static inline struct cpumask *cpu_sibling_mask(int cpu)
10368 {
10369 @@ -77,7 +77,7 @@ struct smp_ops {
10370
10371 void (*send_call_func_ipi)(const struct cpumask *mask);
10372 void (*send_call_func_single_ipi)(int cpu);
10373 -};
10374 +} __no_const;
10375
10376 /* Globals due to paravirt */
10377 extern void set_cpu_sibling_map(int cpu);
10378 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10379 extern int safe_smp_processor_id(void);
10380
10381 #elif defined(CONFIG_X86_64_SMP)
10382 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10383 -
10384 -#define stack_smp_processor_id() \
10385 -({ \
10386 - struct thread_info *ti; \
10387 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10388 - ti->cpu; \
10389 -})
10390 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10391 +#define stack_smp_processor_id() raw_smp_processor_id()
10392 #define safe_smp_processor_id() smp_processor_id()
10393
10394 #endif
10395 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10396 index 972c260..43ab1fd 100644
10397 --- a/arch/x86/include/asm/spinlock.h
10398 +++ b/arch/x86/include/asm/spinlock.h
10399 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10400 static inline void arch_read_lock(arch_rwlock_t *rw)
10401 {
10402 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10403 +
10404 +#ifdef CONFIG_PAX_REFCOUNT
10405 + "jno 0f\n"
10406 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10407 + "int $4\n0:\n"
10408 + _ASM_EXTABLE(0b, 0b)
10409 +#endif
10410 +
10411 "jns 1f\n"
10412 "call __read_lock_failed\n\t"
10413 "1:\n"
10414 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10415 static inline void arch_write_lock(arch_rwlock_t *rw)
10416 {
10417 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10418 +
10419 +#ifdef CONFIG_PAX_REFCOUNT
10420 + "jno 0f\n"
10421 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10422 + "int $4\n0:\n"
10423 + _ASM_EXTABLE(0b, 0b)
10424 +#endif
10425 +
10426 "jz 1f\n"
10427 "call __write_lock_failed\n\t"
10428 "1:\n"
10429 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10430
10431 static inline void arch_read_unlock(arch_rwlock_t *rw)
10432 {
10433 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10434 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10435 +
10436 +#ifdef CONFIG_PAX_REFCOUNT
10437 + "jno 0f\n"
10438 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10439 + "int $4\n0:\n"
10440 + _ASM_EXTABLE(0b, 0b)
10441 +#endif
10442 +
10443 :"+m" (rw->lock) : : "memory");
10444 }
10445
10446 static inline void arch_write_unlock(arch_rwlock_t *rw)
10447 {
10448 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10449 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10450 +
10451 +#ifdef CONFIG_PAX_REFCOUNT
10452 + "jno 0f\n"
10453 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10454 + "int $4\n0:\n"
10455 + _ASM_EXTABLE(0b, 0b)
10456 +#endif
10457 +
10458 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10459 }
10460
10461 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10462 index 1575177..cb23f52 100644
10463 --- a/arch/x86/include/asm/stackprotector.h
10464 +++ b/arch/x86/include/asm/stackprotector.h
10465 @@ -48,7 +48,7 @@
10466 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10467 */
10468 #define GDT_STACK_CANARY_INIT \
10469 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10470 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10471
10472 /*
10473 * Initialize the stackprotector canary value.
10474 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10475
10476 static inline void load_stack_canary_segment(void)
10477 {
10478 -#ifdef CONFIG_X86_32
10479 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10480 asm volatile ("mov %0, %%gs" : : "r" (0));
10481 #endif
10482 }
10483 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10484 index 70bbe39..4ae2bd4 100644
10485 --- a/arch/x86/include/asm/stacktrace.h
10486 +++ b/arch/x86/include/asm/stacktrace.h
10487 @@ -11,28 +11,20 @@
10488
10489 extern int kstack_depth_to_print;
10490
10491 -struct thread_info;
10492 +struct task_struct;
10493 struct stacktrace_ops;
10494
10495 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10496 - unsigned long *stack,
10497 - unsigned long bp,
10498 - const struct stacktrace_ops *ops,
10499 - void *data,
10500 - unsigned long *end,
10501 - int *graph);
10502 +typedef unsigned long walk_stack_t(struct task_struct *task,
10503 + void *stack_start,
10504 + unsigned long *stack,
10505 + unsigned long bp,
10506 + const struct stacktrace_ops *ops,
10507 + void *data,
10508 + unsigned long *end,
10509 + int *graph);
10510
10511 -extern unsigned long
10512 -print_context_stack(struct thread_info *tinfo,
10513 - unsigned long *stack, unsigned long bp,
10514 - const struct stacktrace_ops *ops, void *data,
10515 - unsigned long *end, int *graph);
10516 -
10517 -extern unsigned long
10518 -print_context_stack_bp(struct thread_info *tinfo,
10519 - unsigned long *stack, unsigned long bp,
10520 - const struct stacktrace_ops *ops, void *data,
10521 - unsigned long *end, int *graph);
10522 +extern walk_stack_t print_context_stack;
10523 +extern walk_stack_t print_context_stack_bp;
10524
10525 /* Generic stack tracer with callbacks */
10526
10527 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10528 void (*address)(void *data, unsigned long address, int reliable);
10529 /* On negative return stop dumping */
10530 int (*stack)(void *data, char *name);
10531 - walk_stack_t walk_stack;
10532 + walk_stack_t *walk_stack;
10533 };
10534
10535 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10536 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10537 index cb23852..2dde194 100644
10538 --- a/arch/x86/include/asm/sys_ia32.h
10539 +++ b/arch/x86/include/asm/sys_ia32.h
10540 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10541 compat_sigset_t __user *, unsigned int);
10542 asmlinkage long sys32_alarm(unsigned int);
10543
10544 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10545 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10546 asmlinkage long sys32_sysfs(int, u32, u32);
10547
10548 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10549 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10550 index 2d2f01c..f985723 100644
10551 --- a/arch/x86/include/asm/system.h
10552 +++ b/arch/x86/include/asm/system.h
10553 @@ -129,7 +129,7 @@ do { \
10554 "call __switch_to\n\t" \
10555 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10556 __switch_canary \
10557 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10558 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10559 "movq %%rax,%%rdi\n\t" \
10560 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10561 "jnz ret_from_fork\n\t" \
10562 @@ -140,7 +140,7 @@ do { \
10563 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10564 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10565 [_tif_fork] "i" (_TIF_FORK), \
10566 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10567 + [thread_info] "m" (current_tinfo), \
10568 [current_task] "m" (current_task) \
10569 __switch_canary_iparam \
10570 : "memory", "cc" __EXTRA_CLOBBER)
10571 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10572 {
10573 unsigned long __limit;
10574 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10575 - return __limit + 1;
10576 + return __limit;
10577 }
10578
10579 static inline void native_clts(void)
10580 @@ -397,13 +397,13 @@ void enable_hlt(void);
10581
10582 void cpu_idle_wait(void);
10583
10584 -extern unsigned long arch_align_stack(unsigned long sp);
10585 +#define arch_align_stack(x) ((x) & ~0xfUL)
10586 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10587
10588 void default_idle(void);
10589 bool set_pm_idle_to_default(void);
10590
10591 -void stop_this_cpu(void *dummy);
10592 +void stop_this_cpu(void *dummy) __noreturn;
10593
10594 /*
10595 * Force strict CPU ordering.
10596 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10597 index d7ef849..6af292e 100644
10598 --- a/arch/x86/include/asm/thread_info.h
10599 +++ b/arch/x86/include/asm/thread_info.h
10600 @@ -10,6 +10,7 @@
10601 #include <linux/compiler.h>
10602 #include <asm/page.h>
10603 #include <asm/types.h>
10604 +#include <asm/percpu.h>
10605
10606 /*
10607 * low level task data that entry.S needs immediate access to
10608 @@ -24,7 +25,6 @@ struct exec_domain;
10609 #include <linux/atomic.h>
10610
10611 struct thread_info {
10612 - struct task_struct *task; /* main task structure */
10613 struct exec_domain *exec_domain; /* execution domain */
10614 __u32 flags; /* low level flags */
10615 __u32 status; /* thread synchronous flags */
10616 @@ -34,18 +34,12 @@ struct thread_info {
10617 mm_segment_t addr_limit;
10618 struct restart_block restart_block;
10619 void __user *sysenter_return;
10620 -#ifdef CONFIG_X86_32
10621 - unsigned long previous_esp; /* ESP of the previous stack in
10622 - case of nested (IRQ) stacks
10623 - */
10624 - __u8 supervisor_stack[0];
10625 -#endif
10626 + unsigned long lowest_stack;
10627 int uaccess_err;
10628 };
10629
10630 -#define INIT_THREAD_INFO(tsk) \
10631 +#define INIT_THREAD_INFO \
10632 { \
10633 - .task = &tsk, \
10634 .exec_domain = &default_exec_domain, \
10635 .flags = 0, \
10636 .cpu = 0, \
10637 @@ -56,7 +50,7 @@ struct thread_info {
10638 }, \
10639 }
10640
10641 -#define init_thread_info (init_thread_union.thread_info)
10642 +#define init_thread_info (init_thread_union.stack)
10643 #define init_stack (init_thread_union.stack)
10644
10645 #else /* !__ASSEMBLY__ */
10646 @@ -170,45 +164,40 @@ struct thread_info {
10647 ret; \
10648 })
10649
10650 -#ifdef CONFIG_X86_32
10651 -
10652 -#define STACK_WARN (THREAD_SIZE/8)
10653 -/*
10654 - * macros/functions for gaining access to the thread information structure
10655 - *
10656 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10657 - */
10658 -#ifndef __ASSEMBLY__
10659 -
10660 -
10661 -/* how to get the current stack pointer from C */
10662 -register unsigned long current_stack_pointer asm("esp") __used;
10663 -
10664 -/* how to get the thread information struct from C */
10665 -static inline struct thread_info *current_thread_info(void)
10666 -{
10667 - return (struct thread_info *)
10668 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10669 -}
10670 -
10671 -#else /* !__ASSEMBLY__ */
10672 -
10673 +#ifdef __ASSEMBLY__
10674 /* how to get the thread information struct from ASM */
10675 #define GET_THREAD_INFO(reg) \
10676 - movl $-THREAD_SIZE, reg; \
10677 - andl %esp, reg
10678 + mov PER_CPU_VAR(current_tinfo), reg
10679
10680 /* use this one if reg already contains %esp */
10681 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10682 - andl $-THREAD_SIZE, reg
10683 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10684 +#else
10685 +/* how to get the thread information struct from C */
10686 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10687 +
10688 +static __always_inline struct thread_info *current_thread_info(void)
10689 +{
10690 + return percpu_read_stable(current_tinfo);
10691 +}
10692 +#endif
10693 +
10694 +#ifdef CONFIG_X86_32
10695 +
10696 +#define STACK_WARN (THREAD_SIZE/8)
10697 +/*
10698 + * macros/functions for gaining access to the thread information structure
10699 + *
10700 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10701 + */
10702 +#ifndef __ASSEMBLY__
10703 +
10704 +/* how to get the current stack pointer from C */
10705 +register unsigned long current_stack_pointer asm("esp") __used;
10706
10707 #endif
10708
10709 #else /* X86_32 */
10710
10711 -#include <asm/percpu.h>
10712 -#define KERNEL_STACK_OFFSET (5*8)
10713 -
10714 /*
10715 * macros/functions for gaining access to the thread information structure
10716 * preempt_count needs to be 1 initially, until the scheduler is functional.
10717 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10718 #ifndef __ASSEMBLY__
10719 DECLARE_PER_CPU(unsigned long, kernel_stack);
10720
10721 -static inline struct thread_info *current_thread_info(void)
10722 -{
10723 - struct thread_info *ti;
10724 - ti = (void *)(percpu_read_stable(kernel_stack) +
10725 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10726 - return ti;
10727 -}
10728 -
10729 -#else /* !__ASSEMBLY__ */
10730 -
10731 -/* how to get the thread information struct from ASM */
10732 -#define GET_THREAD_INFO(reg) \
10733 - movq PER_CPU_VAR(kernel_stack),reg ; \
10734 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10735 -
10736 +/* how to get the current stack pointer from C */
10737 +register unsigned long current_stack_pointer asm("rsp") __used;
10738 #endif
10739
10740 #endif /* !X86_32 */
10741 @@ -264,5 +240,16 @@ extern void arch_task_cache_init(void);
10742 extern void free_thread_info(struct thread_info *ti);
10743 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10744 #define arch_task_cache_init arch_task_cache_init
10745 +
10746 +#define __HAVE_THREAD_FUNCTIONS
10747 +#define task_thread_info(task) (&(task)->tinfo)
10748 +#define task_stack_page(task) ((task)->stack)
10749 +#define setup_thread_stack(p, org) do {} while (0)
10750 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10751 +
10752 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10753 +extern struct task_struct *alloc_task_struct_node(int node);
10754 +extern void free_task_struct(struct task_struct *);
10755 +
10756 #endif
10757 #endif /* _ASM_X86_THREAD_INFO_H */
10758 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10759 index 36361bf..324f262 100644
10760 --- a/arch/x86/include/asm/uaccess.h
10761 +++ b/arch/x86/include/asm/uaccess.h
10762 @@ -7,12 +7,15 @@
10763 #include <linux/compiler.h>
10764 #include <linux/thread_info.h>
10765 #include <linux/string.h>
10766 +#include <linux/sched.h>
10767 #include <asm/asm.h>
10768 #include <asm/page.h>
10769
10770 #define VERIFY_READ 0
10771 #define VERIFY_WRITE 1
10772
10773 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10774 +
10775 /*
10776 * The fs value determines whether argument validity checking should be
10777 * performed or not. If get_fs() == USER_DS, checking is performed, with
10778 @@ -28,7 +31,12 @@
10779
10780 #define get_ds() (KERNEL_DS)
10781 #define get_fs() (current_thread_info()->addr_limit)
10782 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10783 +void __set_fs(mm_segment_t x);
10784 +void set_fs(mm_segment_t x);
10785 +#else
10786 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10787 +#endif
10788
10789 #define segment_eq(a, b) ((a).seg == (b).seg)
10790
10791 @@ -76,7 +84,33 @@
10792 * checks that the pointer is in the user space range - after calling
10793 * this function, memory access functions may still return -EFAULT.
10794 */
10795 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10796 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10797 +#define access_ok(type, addr, size) \
10798 +({ \
10799 + long __size = size; \
10800 + unsigned long __addr = (unsigned long)addr; \
10801 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10802 + unsigned long __end_ao = __addr + __size - 1; \
10803 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10804 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10805 + while(__addr_ao <= __end_ao) { \
10806 + char __c_ao; \
10807 + __addr_ao += PAGE_SIZE; \
10808 + if (__size > PAGE_SIZE) \
10809 + cond_resched(); \
10810 + if (__get_user(__c_ao, (char __user *)__addr)) \
10811 + break; \
10812 + if (type != VERIFY_WRITE) { \
10813 + __addr = __addr_ao; \
10814 + continue; \
10815 + } \
10816 + if (__put_user(__c_ao, (char __user *)__addr)) \
10817 + break; \
10818 + __addr = __addr_ao; \
10819 + } \
10820 + } \
10821 + __ret_ao; \
10822 +})
10823
10824 /*
10825 * The exception table consists of pairs of addresses: the first is the
10826 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10827 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10828 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10829
10830 -
10831 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10832 +#define __copyuser_seg "gs;"
10833 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10834 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10835 +#else
10836 +#define __copyuser_seg
10837 +#define __COPYUSER_SET_ES
10838 +#define __COPYUSER_RESTORE_ES
10839 +#endif
10840
10841 #ifdef CONFIG_X86_32
10842 #define __put_user_asm_u64(x, addr, err, errret) \
10843 - asm volatile("1: movl %%eax,0(%2)\n" \
10844 - "2: movl %%edx,4(%2)\n" \
10845 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10846 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10847 "3:\n" \
10848 ".section .fixup,\"ax\"\n" \
10849 "4: movl %3,%0\n" \
10850 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10851 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10852
10853 #define __put_user_asm_ex_u64(x, addr) \
10854 - asm volatile("1: movl %%eax,0(%1)\n" \
10855 - "2: movl %%edx,4(%1)\n" \
10856 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10857 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10858 "3:\n" \
10859 _ASM_EXTABLE(1b, 2b - 1b) \
10860 _ASM_EXTABLE(2b, 3b - 2b) \
10861 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10862 __typeof__(*(ptr)) __pu_val; \
10863 __chk_user_ptr(ptr); \
10864 might_fault(); \
10865 - __pu_val = x; \
10866 + __pu_val = (x); \
10867 switch (sizeof(*(ptr))) { \
10868 case 1: \
10869 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10870 @@ -373,7 +415,7 @@ do { \
10871 } while (0)
10872
10873 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10874 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10875 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10876 "2:\n" \
10877 ".section .fixup,\"ax\"\n" \
10878 "3: mov %3,%0\n" \
10879 @@ -381,7 +423,7 @@ do { \
10880 " jmp 2b\n" \
10881 ".previous\n" \
10882 _ASM_EXTABLE(1b, 3b) \
10883 - : "=r" (err), ltype(x) \
10884 + : "=r" (err), ltype (x) \
10885 : "m" (__m(addr)), "i" (errret), "0" (err))
10886
10887 #define __get_user_size_ex(x, ptr, size) \
10888 @@ -406,7 +448,7 @@ do { \
10889 } while (0)
10890
10891 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10892 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10893 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10894 "2:\n" \
10895 _ASM_EXTABLE(1b, 2b - 1b) \
10896 : ltype(x) : "m" (__m(addr)))
10897 @@ -423,13 +465,24 @@ do { \
10898 int __gu_err; \
10899 unsigned long __gu_val; \
10900 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10901 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10902 + (x) = (__typeof__(*(ptr)))__gu_val; \
10903 __gu_err; \
10904 })
10905
10906 /* FIXME: this hack is definitely wrong -AK */
10907 struct __large_struct { unsigned long buf[100]; };
10908 -#define __m(x) (*(struct __large_struct __user *)(x))
10909 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10910 +#define ____m(x) \
10911 +({ \
10912 + unsigned long ____x = (unsigned long)(x); \
10913 + if (____x < PAX_USER_SHADOW_BASE) \
10914 + ____x += PAX_USER_SHADOW_BASE; \
10915 + (void __user *)____x; \
10916 +})
10917 +#else
10918 +#define ____m(x) (x)
10919 +#endif
10920 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10921
10922 /*
10923 * Tell gcc we read from memory instead of writing: this is because
10924 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10925 * aliasing issues.
10926 */
10927 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10928 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10929 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10930 "2:\n" \
10931 ".section .fixup,\"ax\"\n" \
10932 "3: mov %3,%0\n" \
10933 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10934 ".previous\n" \
10935 _ASM_EXTABLE(1b, 3b) \
10936 : "=r"(err) \
10937 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10938 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10939
10940 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10941 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10942 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10943 "2:\n" \
10944 _ASM_EXTABLE(1b, 2b - 1b) \
10945 : : ltype(x), "m" (__m(addr)))
10946 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10947 * On error, the variable @x is set to zero.
10948 */
10949
10950 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10951 +#define __get_user(x, ptr) get_user((x), (ptr))
10952 +#else
10953 #define __get_user(x, ptr) \
10954 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10955 +#endif
10956
10957 /**
10958 * __put_user: - Write a simple value into user space, with less checking.
10959 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10960 * Returns zero on success, or -EFAULT on error.
10961 */
10962
10963 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10964 +#define __put_user(x, ptr) put_user((x), (ptr))
10965 +#else
10966 #define __put_user(x, ptr) \
10967 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10968 +#endif
10969
10970 #define __get_user_unaligned __get_user
10971 #define __put_user_unaligned __put_user
10972 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10973 #define get_user_ex(x, ptr) do { \
10974 unsigned long __gue_val; \
10975 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10976 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10977 + (x) = (__typeof__(*(ptr)))__gue_val; \
10978 } while (0)
10979
10980 #ifdef CONFIG_X86_WP_WORKS_OK
10981 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10982 index 566e803..b9521e9 100644
10983 --- a/arch/x86/include/asm/uaccess_32.h
10984 +++ b/arch/x86/include/asm/uaccess_32.h
10985 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10986 static __always_inline unsigned long __must_check
10987 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10988 {
10989 + if ((long)n < 0)
10990 + return n;
10991 +
10992 if (__builtin_constant_p(n)) {
10993 unsigned long ret;
10994
10995 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10996 return ret;
10997 }
10998 }
10999 + if (!__builtin_constant_p(n))
11000 + check_object_size(from, n, true);
11001 return __copy_to_user_ll(to, from, n);
11002 }
11003
11004 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
11005 __copy_to_user(void __user *to, const void *from, unsigned long n)
11006 {
11007 might_fault();
11008 +
11009 return __copy_to_user_inatomic(to, from, n);
11010 }
11011
11012 static __always_inline unsigned long
11013 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
11014 {
11015 + if ((long)n < 0)
11016 + return n;
11017 +
11018 /* Avoid zeroing the tail if the copy fails..
11019 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
11020 * but as the zeroing behaviour is only significant when n is not
11021 @@ -137,6 +146,10 @@ static __always_inline unsigned long
11022 __copy_from_user(void *to, const void __user *from, unsigned long n)
11023 {
11024 might_fault();
11025 +
11026 + if ((long)n < 0)
11027 + return n;
11028 +
11029 if (__builtin_constant_p(n)) {
11030 unsigned long ret;
11031
11032 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
11033 return ret;
11034 }
11035 }
11036 + if (!__builtin_constant_p(n))
11037 + check_object_size(to, n, false);
11038 return __copy_from_user_ll(to, from, n);
11039 }
11040
11041 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
11042 const void __user *from, unsigned long n)
11043 {
11044 might_fault();
11045 +
11046 + if ((long)n < 0)
11047 + return n;
11048 +
11049 if (__builtin_constant_p(n)) {
11050 unsigned long ret;
11051
11052 @@ -181,15 +200,19 @@ static __always_inline unsigned long
11053 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
11054 unsigned long n)
11055 {
11056 - return __copy_from_user_ll_nocache_nozero(to, from, n);
11057 + if ((long)n < 0)
11058 + return n;
11059 +
11060 + return __copy_from_user_ll_nocache_nozero(to, from, n);
11061 }
11062
11063 -unsigned long __must_check copy_to_user(void __user *to,
11064 - const void *from, unsigned long n);
11065 -unsigned long __must_check _copy_from_user(void *to,
11066 - const void __user *from,
11067 - unsigned long n);
11068 -
11069 +extern void copy_to_user_overflow(void)
11070 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11071 + __compiletime_error("copy_to_user() buffer size is not provably correct")
11072 +#else
11073 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
11074 +#endif
11075 +;
11076
11077 extern void copy_from_user_overflow(void)
11078 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11079 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
11080 #endif
11081 ;
11082
11083 -static inline unsigned long __must_check copy_from_user(void *to,
11084 - const void __user *from,
11085 - unsigned long n)
11086 +/**
11087 + * copy_to_user: - Copy a block of data into user space.
11088 + * @to: Destination address, in user space.
11089 + * @from: Source address, in kernel space.
11090 + * @n: Number of bytes to copy.
11091 + *
11092 + * Context: User context only. This function may sleep.
11093 + *
11094 + * Copy data from kernel space to user space.
11095 + *
11096 + * Returns number of bytes that could not be copied.
11097 + * On success, this will be zero.
11098 + */
11099 +static inline unsigned long __must_check
11100 +copy_to_user(void __user *to, const void *from, unsigned long n)
11101 +{
11102 + int sz = __compiletime_object_size(from);
11103 +
11104 + if (unlikely(sz != -1 && sz < n))
11105 + copy_to_user_overflow();
11106 + else if (access_ok(VERIFY_WRITE, to, n))
11107 + n = __copy_to_user(to, from, n);
11108 + return n;
11109 +}
11110 +
11111 +/**
11112 + * copy_from_user: - Copy a block of data from user space.
11113 + * @to: Destination address, in kernel space.
11114 + * @from: Source address, in user space.
11115 + * @n: Number of bytes to copy.
11116 + *
11117 + * Context: User context only. This function may sleep.
11118 + *
11119 + * Copy data from user space to kernel space.
11120 + *
11121 + * Returns number of bytes that could not be copied.
11122 + * On success, this will be zero.
11123 + *
11124 + * If some data could not be copied, this function will pad the copied
11125 + * data to the requested size using zero bytes.
11126 + */
11127 +static inline unsigned long __must_check
11128 +copy_from_user(void *to, const void __user *from, unsigned long n)
11129 {
11130 int sz = __compiletime_object_size(to);
11131
11132 - if (likely(sz == -1 || sz >= n))
11133 - n = _copy_from_user(to, from, n);
11134 - else
11135 + if (unlikely(sz != -1 && sz < n))
11136 copy_from_user_overflow();
11137 -
11138 + else if (access_ok(VERIFY_READ, from, n))
11139 + n = __copy_from_user(to, from, n);
11140 + else if ((long)n > 0) {
11141 + if (!__builtin_constant_p(n))
11142 + check_object_size(to, n, false);
11143 + memset(to, 0, n);
11144 + }
11145 return n;
11146 }
11147
11148 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
11149 index 1c66d30..e66922c 100644
11150 --- a/arch/x86/include/asm/uaccess_64.h
11151 +++ b/arch/x86/include/asm/uaccess_64.h
11152 @@ -10,6 +10,9 @@
11153 #include <asm/alternative.h>
11154 #include <asm/cpufeature.h>
11155 #include <asm/page.h>
11156 +#include <asm/pgtable.h>
11157 +
11158 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
11159
11160 /*
11161 * Copy To/From Userspace
11162 @@ -17,12 +20,12 @@
11163
11164 /* Handles exceptions in both to and from, but doesn't do access_ok */
11165 __must_check unsigned long
11166 -copy_user_generic_string(void *to, const void *from, unsigned len);
11167 +copy_user_generic_string(void *to, const void *from, unsigned long len);
11168 __must_check unsigned long
11169 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
11170 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
11171
11172 static __always_inline __must_check unsigned long
11173 -copy_user_generic(void *to, const void *from, unsigned len)
11174 +copy_user_generic(void *to, const void *from, unsigned long len)
11175 {
11176 unsigned ret;
11177
11178 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
11179 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
11180 "=d" (len)),
11181 "1" (to), "2" (from), "3" (len)
11182 - : "memory", "rcx", "r8", "r9", "r10", "r11");
11183 + : "memory", "rcx", "r8", "r9", "r11");
11184 return ret;
11185 }
11186
11187 +static __always_inline __must_check unsigned long
11188 +__copy_to_user(void __user *to, const void *from, unsigned long len);
11189 +static __always_inline __must_check unsigned long
11190 +__copy_from_user(void *to, const void __user *from, unsigned long len);
11191 __must_check unsigned long
11192 -_copy_to_user(void __user *to, const void *from, unsigned len);
11193 -__must_check unsigned long
11194 -_copy_from_user(void *to, const void __user *from, unsigned len);
11195 -__must_check unsigned long
11196 -copy_in_user(void __user *to, const void __user *from, unsigned len);
11197 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
11198
11199 static inline unsigned long __must_check copy_from_user(void *to,
11200 const void __user *from,
11201 unsigned long n)
11202 {
11203 - int sz = __compiletime_object_size(to);
11204 -
11205 might_fault();
11206 - if (likely(sz == -1 || sz >= n))
11207 - n = _copy_from_user(to, from, n);
11208 -#ifdef CONFIG_DEBUG_VM
11209 - else
11210 - WARN(1, "Buffer overflow detected!\n");
11211 -#endif
11212 +
11213 + if (access_ok(VERIFY_READ, from, n))
11214 + n = __copy_from_user(to, from, n);
11215 + else if (n < INT_MAX) {
11216 + if (!__builtin_constant_p(n))
11217 + check_object_size(to, n, false);
11218 + memset(to, 0, n);
11219 + }
11220 return n;
11221 }
11222
11223 static __always_inline __must_check
11224 -int copy_to_user(void __user *dst, const void *src, unsigned size)
11225 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
11226 {
11227 might_fault();
11228
11229 - return _copy_to_user(dst, src, size);
11230 + if (access_ok(VERIFY_WRITE, dst, size))
11231 + size = __copy_to_user(dst, src, size);
11232 + return size;
11233 }
11234
11235 static __always_inline __must_check
11236 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
11237 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
11238 {
11239 - int ret = 0;
11240 + int sz = __compiletime_object_size(dst);
11241 + unsigned ret = 0;
11242
11243 might_fault();
11244 - if (!__builtin_constant_p(size))
11245 - return copy_user_generic(dst, (__force void *)src, size);
11246 +
11247 + if (size > INT_MAX)
11248 + return size;
11249 +
11250 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11251 + if (!__access_ok(VERIFY_READ, src, size))
11252 + return size;
11253 +#endif
11254 +
11255 + if (unlikely(sz != -1 && sz < size)) {
11256 +#ifdef CONFIG_DEBUG_VM
11257 + WARN(1, "Buffer overflow detected!\n");
11258 +#endif
11259 + return size;
11260 + }
11261 +
11262 + if (!__builtin_constant_p(size)) {
11263 + check_object_size(dst, size, false);
11264 +
11265 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11266 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11267 + src += PAX_USER_SHADOW_BASE;
11268 +#endif
11269 +
11270 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11271 + }
11272 switch (size) {
11273 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11274 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11275 ret, "b", "b", "=q", 1);
11276 return ret;
11277 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11278 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11279 ret, "w", "w", "=r", 2);
11280 return ret;
11281 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11282 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11283 ret, "l", "k", "=r", 4);
11284 return ret;
11285 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11286 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11287 ret, "q", "", "=r", 8);
11288 return ret;
11289 case 10:
11290 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11291 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11292 ret, "q", "", "=r", 10);
11293 if (unlikely(ret))
11294 return ret;
11295 __get_user_asm(*(u16 *)(8 + (char *)dst),
11296 - (u16 __user *)(8 + (char __user *)src),
11297 + (const u16 __user *)(8 + (const char __user *)src),
11298 ret, "w", "w", "=r", 2);
11299 return ret;
11300 case 16:
11301 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11302 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11303 ret, "q", "", "=r", 16);
11304 if (unlikely(ret))
11305 return ret;
11306 __get_user_asm(*(u64 *)(8 + (char *)dst),
11307 - (u64 __user *)(8 + (char __user *)src),
11308 + (const u64 __user *)(8 + (const char __user *)src),
11309 ret, "q", "", "=r", 8);
11310 return ret;
11311 default:
11312 - return copy_user_generic(dst, (__force void *)src, size);
11313 +
11314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11315 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11316 + src += PAX_USER_SHADOW_BASE;
11317 +#endif
11318 +
11319 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11320 }
11321 }
11322
11323 static __always_inline __must_check
11324 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
11325 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11326 {
11327 - int ret = 0;
11328 + int sz = __compiletime_object_size(src);
11329 + unsigned ret = 0;
11330
11331 might_fault();
11332 - if (!__builtin_constant_p(size))
11333 - return copy_user_generic((__force void *)dst, src, size);
11334 +
11335 + if (size > INT_MAX)
11336 + return size;
11337 +
11338 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11339 + if (!__access_ok(VERIFY_WRITE, dst, size))
11340 + return size;
11341 +#endif
11342 +
11343 + if (unlikely(sz != -1 && sz < size)) {
11344 +#ifdef CONFIG_DEBUG_VM
11345 + WARN(1, "Buffer overflow detected!\n");
11346 +#endif
11347 + return size;
11348 + }
11349 +
11350 + if (!__builtin_constant_p(size)) {
11351 + check_object_size(src, size, true);
11352 +
11353 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11354 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11355 + dst += PAX_USER_SHADOW_BASE;
11356 +#endif
11357 +
11358 + return copy_user_generic((__force_kernel void *)dst, src, size);
11359 + }
11360 switch (size) {
11361 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11362 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11363 ret, "b", "b", "iq", 1);
11364 return ret;
11365 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11366 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11367 ret, "w", "w", "ir", 2);
11368 return ret;
11369 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11370 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11371 ret, "l", "k", "ir", 4);
11372 return ret;
11373 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11374 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11375 ret, "q", "", "er", 8);
11376 return ret;
11377 case 10:
11378 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11379 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11380 ret, "q", "", "er", 10);
11381 if (unlikely(ret))
11382 return ret;
11383 asm("":::"memory");
11384 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11385 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11386 ret, "w", "w", "ir", 2);
11387 return ret;
11388 case 16:
11389 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11390 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11391 ret, "q", "", "er", 16);
11392 if (unlikely(ret))
11393 return ret;
11394 asm("":::"memory");
11395 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11396 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11397 ret, "q", "", "er", 8);
11398 return ret;
11399 default:
11400 - return copy_user_generic((__force void *)dst, src, size);
11401 +
11402 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11403 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11404 + dst += PAX_USER_SHADOW_BASE;
11405 +#endif
11406 +
11407 + return copy_user_generic((__force_kernel void *)dst, src, size);
11408 }
11409 }
11410
11411 static __always_inline __must_check
11412 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11413 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11414 {
11415 - int ret = 0;
11416 + unsigned ret = 0;
11417
11418 might_fault();
11419 - if (!__builtin_constant_p(size))
11420 - return copy_user_generic((__force void *)dst,
11421 - (__force void *)src, size);
11422 +
11423 + if (size > INT_MAX)
11424 + return size;
11425 +
11426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11427 + if (!__access_ok(VERIFY_READ, src, size))
11428 + return size;
11429 + if (!__access_ok(VERIFY_WRITE, dst, size))
11430 + return size;
11431 +#endif
11432 +
11433 + if (!__builtin_constant_p(size)) {
11434 +
11435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11436 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11437 + src += PAX_USER_SHADOW_BASE;
11438 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11439 + dst += PAX_USER_SHADOW_BASE;
11440 +#endif
11441 +
11442 + return copy_user_generic((__force_kernel void *)dst,
11443 + (__force_kernel const void *)src, size);
11444 + }
11445 switch (size) {
11446 case 1: {
11447 u8 tmp;
11448 - __get_user_asm(tmp, (u8 __user *)src,
11449 + __get_user_asm(tmp, (const u8 __user *)src,
11450 ret, "b", "b", "=q", 1);
11451 if (likely(!ret))
11452 __put_user_asm(tmp, (u8 __user *)dst,
11453 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11454 }
11455 case 2: {
11456 u16 tmp;
11457 - __get_user_asm(tmp, (u16 __user *)src,
11458 + __get_user_asm(tmp, (const u16 __user *)src,
11459 ret, "w", "w", "=r", 2);
11460 if (likely(!ret))
11461 __put_user_asm(tmp, (u16 __user *)dst,
11462 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11463
11464 case 4: {
11465 u32 tmp;
11466 - __get_user_asm(tmp, (u32 __user *)src,
11467 + __get_user_asm(tmp, (const u32 __user *)src,
11468 ret, "l", "k", "=r", 4);
11469 if (likely(!ret))
11470 __put_user_asm(tmp, (u32 __user *)dst,
11471 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11472 }
11473 case 8: {
11474 u64 tmp;
11475 - __get_user_asm(tmp, (u64 __user *)src,
11476 + __get_user_asm(tmp, (const u64 __user *)src,
11477 ret, "q", "", "=r", 8);
11478 if (likely(!ret))
11479 __put_user_asm(tmp, (u64 __user *)dst,
11480 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11481 return ret;
11482 }
11483 default:
11484 - return copy_user_generic((__force void *)dst,
11485 - (__force void *)src, size);
11486 +
11487 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11488 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11489 + src += PAX_USER_SHADOW_BASE;
11490 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11491 + dst += PAX_USER_SHADOW_BASE;
11492 +#endif
11493 +
11494 + return copy_user_generic((__force_kernel void *)dst,
11495 + (__force_kernel const void *)src, size);
11496 }
11497 }
11498
11499 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11500 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11501
11502 static __must_check __always_inline int
11503 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11504 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11505 {
11506 - return copy_user_generic(dst, (__force const void *)src, size);
11507 + if (size > INT_MAX)
11508 + return size;
11509 +
11510 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11511 + if (!__access_ok(VERIFY_READ, src, size))
11512 + return size;
11513 +
11514 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11515 + src += PAX_USER_SHADOW_BASE;
11516 +#endif
11517 +
11518 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11519 }
11520
11521 -static __must_check __always_inline int
11522 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11523 +static __must_check __always_inline unsigned long
11524 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11525 {
11526 - return copy_user_generic((__force void *)dst, src, size);
11527 + if (size > INT_MAX)
11528 + return size;
11529 +
11530 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11531 + if (!__access_ok(VERIFY_WRITE, dst, size))
11532 + return size;
11533 +
11534 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11535 + dst += PAX_USER_SHADOW_BASE;
11536 +#endif
11537 +
11538 + return copy_user_generic((__force_kernel void *)dst, src, size);
11539 }
11540
11541 -extern long __copy_user_nocache(void *dst, const void __user *src,
11542 - unsigned size, int zerorest);
11543 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11544 + unsigned long size, int zerorest);
11545
11546 -static inline int
11547 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11548 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11549 {
11550 might_sleep();
11551 +
11552 + if (size > INT_MAX)
11553 + return size;
11554 +
11555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11556 + if (!__access_ok(VERIFY_READ, src, size))
11557 + return size;
11558 +#endif
11559 +
11560 return __copy_user_nocache(dst, src, size, 1);
11561 }
11562
11563 -static inline int
11564 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11565 - unsigned size)
11566 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11567 + unsigned long size)
11568 {
11569 + if (size > INT_MAX)
11570 + return size;
11571 +
11572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11573 + if (!__access_ok(VERIFY_READ, src, size))
11574 + return size;
11575 +#endif
11576 +
11577 return __copy_user_nocache(dst, src, size, 0);
11578 }
11579
11580 -unsigned long
11581 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11582 +extern unsigned long
11583 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11584
11585 #endif /* _ASM_X86_UACCESS_64_H */
11586 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11587 index bb05228..d763d5b 100644
11588 --- a/arch/x86/include/asm/vdso.h
11589 +++ b/arch/x86/include/asm/vdso.h
11590 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11591 #define VDSO32_SYMBOL(base, name) \
11592 ({ \
11593 extern const char VDSO32_##name[]; \
11594 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11595 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11596 })
11597 #endif
11598
11599 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11600 index 1971e65..1e3559b 100644
11601 --- a/arch/x86/include/asm/x86_init.h
11602 +++ b/arch/x86/include/asm/x86_init.h
11603 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11604 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11605 void (*find_smp_config)(void);
11606 void (*get_smp_config)(unsigned int early);
11607 -};
11608 +} __no_const;
11609
11610 /**
11611 * struct x86_init_resources - platform specific resource related ops
11612 @@ -42,7 +42,7 @@ struct x86_init_resources {
11613 void (*probe_roms)(void);
11614 void (*reserve_resources)(void);
11615 char *(*memory_setup)(void);
11616 -};
11617 +} __no_const;
11618
11619 /**
11620 * struct x86_init_irqs - platform specific interrupt setup
11621 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11622 void (*pre_vector_init)(void);
11623 void (*intr_init)(void);
11624 void (*trap_init)(void);
11625 -};
11626 +} __no_const;
11627
11628 /**
11629 * struct x86_init_oem - oem platform specific customizing functions
11630 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11631 struct x86_init_oem {
11632 void (*arch_setup)(void);
11633 void (*banner)(void);
11634 -};
11635 +} __no_const;
11636
11637 /**
11638 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11639 @@ -76,7 +76,7 @@ struct x86_init_oem {
11640 */
11641 struct x86_init_mapping {
11642 void (*pagetable_reserve)(u64 start, u64 end);
11643 -};
11644 +} __no_const;
11645
11646 /**
11647 * struct x86_init_paging - platform specific paging functions
11648 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11649 struct x86_init_paging {
11650 void (*pagetable_setup_start)(pgd_t *base);
11651 void (*pagetable_setup_done)(pgd_t *base);
11652 -};
11653 +} __no_const;
11654
11655 /**
11656 * struct x86_init_timers - platform specific timer setup
11657 @@ -101,7 +101,7 @@ struct x86_init_timers {
11658 void (*tsc_pre_init)(void);
11659 void (*timer_init)(void);
11660 void (*wallclock_init)(void);
11661 -};
11662 +} __no_const;
11663
11664 /**
11665 * struct x86_init_iommu - platform specific iommu setup
11666 @@ -109,7 +109,7 @@ struct x86_init_timers {
11667 */
11668 struct x86_init_iommu {
11669 int (*iommu_init)(void);
11670 -};
11671 +} __no_const;
11672
11673 /**
11674 * struct x86_init_pci - platform specific pci init functions
11675 @@ -123,7 +123,7 @@ struct x86_init_pci {
11676 int (*init)(void);
11677 void (*init_irq)(void);
11678 void (*fixup_irqs)(void);
11679 -};
11680 +} __no_const;
11681
11682 /**
11683 * struct x86_init_ops - functions for platform specific setup
11684 @@ -139,7 +139,7 @@ struct x86_init_ops {
11685 struct x86_init_timers timers;
11686 struct x86_init_iommu iommu;
11687 struct x86_init_pci pci;
11688 -};
11689 +} __no_const;
11690
11691 /**
11692 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11693 @@ -147,7 +147,7 @@ struct x86_init_ops {
11694 */
11695 struct x86_cpuinit_ops {
11696 void (*setup_percpu_clockev)(void);
11697 -};
11698 +} __no_const;
11699
11700 /**
11701 * struct x86_platform_ops - platform specific runtime functions
11702 @@ -169,7 +169,7 @@ struct x86_platform_ops {
11703 void (*nmi_init)(void);
11704 unsigned char (*get_nmi_reason)(void);
11705 int (*i8042_detect)(void);
11706 -};
11707 +} __no_const;
11708
11709 struct pci_dev;
11710
11711 @@ -177,7 +177,7 @@ struct x86_msi_ops {
11712 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11713 void (*teardown_msi_irq)(unsigned int irq);
11714 void (*teardown_msi_irqs)(struct pci_dev *dev);
11715 -};
11716 +} __no_const;
11717
11718 extern struct x86_init_ops x86_init;
11719 extern struct x86_cpuinit_ops x86_cpuinit;
11720 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11721 index c6ce245..ffbdab7 100644
11722 --- a/arch/x86/include/asm/xsave.h
11723 +++ b/arch/x86/include/asm/xsave.h
11724 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11725 {
11726 int err;
11727
11728 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11729 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11730 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11731 +#endif
11732 +
11733 /*
11734 * Clear the xsave header first, so that reserved fields are
11735 * initialized to zero.
11736 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11737 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11738 {
11739 int err;
11740 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11741 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11742 u32 lmask = mask;
11743 u32 hmask = mask >> 32;
11744
11745 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11746 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11747 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11748 +#endif
11749 +
11750 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11751 "2:\n"
11752 ".section .fixup,\"ax\"\n"
11753 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11754 index 6a564ac..9b1340c 100644
11755 --- a/arch/x86/kernel/acpi/realmode/Makefile
11756 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11757 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11758 $(call cc-option, -fno-stack-protector) \
11759 $(call cc-option, -mpreferred-stack-boundary=2)
11760 KBUILD_CFLAGS += $(call cc-option, -m32)
11761 +ifdef CONSTIFY_PLUGIN
11762 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11763 +endif
11764 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11765 GCOV_PROFILE := n
11766
11767 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11768 index b4fd836..4358fe3 100644
11769 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11770 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11771 @@ -108,6 +108,9 @@ wakeup_code:
11772 /* Do any other stuff... */
11773
11774 #ifndef CONFIG_64BIT
11775 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11776 + call verify_cpu
11777 +
11778 /* This could also be done in C code... */
11779 movl pmode_cr3, %eax
11780 movl %eax, %cr3
11781 @@ -131,6 +134,7 @@ wakeup_code:
11782 movl pmode_cr0, %eax
11783 movl %eax, %cr0
11784 jmp pmode_return
11785 +# include "../../verify_cpu.S"
11786 #else
11787 pushw $0
11788 pushw trampoline_segment
11789 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11790 index 103b6ab..2004d0a 100644
11791 --- a/arch/x86/kernel/acpi/sleep.c
11792 +++ b/arch/x86/kernel/acpi/sleep.c
11793 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11794 header->trampoline_segment = trampoline_address() >> 4;
11795 #ifdef CONFIG_SMP
11796 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11797 +
11798 + pax_open_kernel();
11799 early_gdt_descr.address =
11800 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11801 + pax_close_kernel();
11802 +
11803 initial_gs = per_cpu_offset(smp_processor_id());
11804 #endif
11805 initial_code = (unsigned long)wakeup_long64;
11806 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11807 index 13ab720..95d5442 100644
11808 --- a/arch/x86/kernel/acpi/wakeup_32.S
11809 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11810 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11811 # and restore the stack ... but you need gdt for this to work
11812 movl saved_context_esp, %esp
11813
11814 - movl %cs:saved_magic, %eax
11815 - cmpl $0x12345678, %eax
11816 + cmpl $0x12345678, saved_magic
11817 jne bogus_magic
11818
11819 # jump to place where we left off
11820 - movl saved_eip, %eax
11821 - jmp *%eax
11822 + jmp *(saved_eip)
11823
11824 bogus_magic:
11825 jmp bogus_magic
11826 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11827 index 1f84794..e23f862 100644
11828 --- a/arch/x86/kernel/alternative.c
11829 +++ b/arch/x86/kernel/alternative.c
11830 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11831 */
11832 for (a = start; a < end; a++) {
11833 instr = (u8 *)&a->instr_offset + a->instr_offset;
11834 +
11835 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11836 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11837 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11838 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11839 +#endif
11840 +
11841 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11842 BUG_ON(a->replacementlen > a->instrlen);
11843 BUG_ON(a->instrlen > sizeof(insnbuf));
11844 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11845 for (poff = start; poff < end; poff++) {
11846 u8 *ptr = (u8 *)poff + *poff;
11847
11848 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11849 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11850 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11851 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11852 +#endif
11853 +
11854 if (!*poff || ptr < text || ptr >= text_end)
11855 continue;
11856 /* turn DS segment override prefix into lock prefix */
11857 - if (*ptr == 0x3e)
11858 + if (*ktla_ktva(ptr) == 0x3e)
11859 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11860 };
11861 mutex_unlock(&text_mutex);
11862 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11863 for (poff = start; poff < end; poff++) {
11864 u8 *ptr = (u8 *)poff + *poff;
11865
11866 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11867 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11868 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11869 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11870 +#endif
11871 +
11872 if (!*poff || ptr < text || ptr >= text_end)
11873 continue;
11874 /* turn lock prefix into DS segment override prefix */
11875 - if (*ptr == 0xf0)
11876 + if (*ktla_ktva(ptr) == 0xf0)
11877 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11878 };
11879 mutex_unlock(&text_mutex);
11880 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11881
11882 BUG_ON(p->len > MAX_PATCH_LEN);
11883 /* prep the buffer with the original instructions */
11884 - memcpy(insnbuf, p->instr, p->len);
11885 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11886 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11887 (unsigned long)p->instr, p->len);
11888
11889 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11890 if (smp_alt_once)
11891 free_init_pages("SMP alternatives",
11892 (unsigned long)__smp_locks,
11893 - (unsigned long)__smp_locks_end);
11894 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11895
11896 restart_nmi();
11897 }
11898 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11899 * instructions. And on the local CPU you need to be protected again NMI or MCE
11900 * handlers seeing an inconsistent instruction while you patch.
11901 */
11902 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11903 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11904 size_t len)
11905 {
11906 unsigned long flags;
11907 local_irq_save(flags);
11908 - memcpy(addr, opcode, len);
11909 +
11910 + pax_open_kernel();
11911 + memcpy(ktla_ktva(addr), opcode, len);
11912 sync_core();
11913 + pax_close_kernel();
11914 +
11915 local_irq_restore(flags);
11916 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11917 that causes hangs on some VIA CPUs. */
11918 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11919 */
11920 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11921 {
11922 - unsigned long flags;
11923 - char *vaddr;
11924 + unsigned char *vaddr = ktla_ktva(addr);
11925 struct page *pages[2];
11926 - int i;
11927 + size_t i;
11928
11929 if (!core_kernel_text((unsigned long)addr)) {
11930 - pages[0] = vmalloc_to_page(addr);
11931 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11932 + pages[0] = vmalloc_to_page(vaddr);
11933 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11934 } else {
11935 - pages[0] = virt_to_page(addr);
11936 + pages[0] = virt_to_page(vaddr);
11937 WARN_ON(!PageReserved(pages[0]));
11938 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11939 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11940 }
11941 BUG_ON(!pages[0]);
11942 - local_irq_save(flags);
11943 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11944 - if (pages[1])
11945 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11946 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11947 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11948 - clear_fixmap(FIX_TEXT_POKE0);
11949 - if (pages[1])
11950 - clear_fixmap(FIX_TEXT_POKE1);
11951 - local_flush_tlb();
11952 - sync_core();
11953 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11954 - that causes hangs on some VIA CPUs. */
11955 + text_poke_early(addr, opcode, len);
11956 for (i = 0; i < len; i++)
11957 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11958 - local_irq_restore(flags);
11959 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11960 return addr;
11961 }
11962
11963 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11964 index f98d84c..e402a69 100644
11965 --- a/arch/x86/kernel/apic/apic.c
11966 +++ b/arch/x86/kernel/apic/apic.c
11967 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11968 /*
11969 * Debug level, exported for io_apic.c
11970 */
11971 -unsigned int apic_verbosity;
11972 +int apic_verbosity;
11973
11974 int pic_mode;
11975
11976 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11977 apic_write(APIC_ESR, 0);
11978 v1 = apic_read(APIC_ESR);
11979 ack_APIC_irq();
11980 - atomic_inc(&irq_err_count);
11981 + atomic_inc_unchecked(&irq_err_count);
11982
11983 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11984 smp_processor_id(), v0 , v1);
11985 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11986 index 6d939d7..0697fcc 100644
11987 --- a/arch/x86/kernel/apic/io_apic.c
11988 +++ b/arch/x86/kernel/apic/io_apic.c
11989 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11990 }
11991 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11992
11993 -void lock_vector_lock(void)
11994 +void lock_vector_lock(void) __acquires(vector_lock)
11995 {
11996 /* Used to the online set of cpus does not change
11997 * during assign_irq_vector.
11998 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11999 raw_spin_lock(&vector_lock);
12000 }
12001
12002 -void unlock_vector_lock(void)
12003 +void unlock_vector_lock(void) __releases(vector_lock)
12004 {
12005 raw_spin_unlock(&vector_lock);
12006 }
12007 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
12008 ack_APIC_irq();
12009 }
12010
12011 -atomic_t irq_mis_count;
12012 +atomic_unchecked_t irq_mis_count;
12013
12014 static void ack_apic_level(struct irq_data *data)
12015 {
12016 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
12017 * at the cpu.
12018 */
12019 if (!(v & (1 << (i & 0x1f)))) {
12020 - atomic_inc(&irq_mis_count);
12021 + atomic_inc_unchecked(&irq_mis_count);
12022
12023 eoi_ioapic_irq(irq, cfg);
12024 }
12025 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
12026 index a46bd38..6b906d7 100644
12027 --- a/arch/x86/kernel/apm_32.c
12028 +++ b/arch/x86/kernel/apm_32.c
12029 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
12030 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12031 * even though they are called in protected mode.
12032 */
12033 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12034 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12035 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12036
12037 static const char driver_version[] = "1.16ac"; /* no spaces */
12038 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
12039 BUG_ON(cpu != 0);
12040 gdt = get_cpu_gdt_table(cpu);
12041 save_desc_40 = gdt[0x40 / 8];
12042 +
12043 + pax_open_kernel();
12044 gdt[0x40 / 8] = bad_bios_desc;
12045 + pax_close_kernel();
12046
12047 apm_irq_save(flags);
12048 APM_DO_SAVE_SEGS;
12049 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
12050 &call->esi);
12051 APM_DO_RESTORE_SEGS;
12052 apm_irq_restore(flags);
12053 +
12054 + pax_open_kernel();
12055 gdt[0x40 / 8] = save_desc_40;
12056 + pax_close_kernel();
12057 +
12058 put_cpu();
12059
12060 return call->eax & 0xff;
12061 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
12062 BUG_ON(cpu != 0);
12063 gdt = get_cpu_gdt_table(cpu);
12064 save_desc_40 = gdt[0x40 / 8];
12065 +
12066 + pax_open_kernel();
12067 gdt[0x40 / 8] = bad_bios_desc;
12068 + pax_close_kernel();
12069
12070 apm_irq_save(flags);
12071 APM_DO_SAVE_SEGS;
12072 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
12073 &call->eax);
12074 APM_DO_RESTORE_SEGS;
12075 apm_irq_restore(flags);
12076 +
12077 + pax_open_kernel();
12078 gdt[0x40 / 8] = save_desc_40;
12079 + pax_close_kernel();
12080 +
12081 put_cpu();
12082 return error;
12083 }
12084 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
12085 * code to that CPU.
12086 */
12087 gdt = get_cpu_gdt_table(0);
12088 +
12089 + pax_open_kernel();
12090 set_desc_base(&gdt[APM_CS >> 3],
12091 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12092 set_desc_base(&gdt[APM_CS_16 >> 3],
12093 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12094 set_desc_base(&gdt[APM_DS >> 3],
12095 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12096 + pax_close_kernel();
12097
12098 proc_create("apm", 0, NULL, &apm_file_ops);
12099
12100 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
12101 index 4f13faf..87db5d2 100644
12102 --- a/arch/x86/kernel/asm-offsets.c
12103 +++ b/arch/x86/kernel/asm-offsets.c
12104 @@ -33,6 +33,8 @@ void common(void) {
12105 OFFSET(TI_status, thread_info, status);
12106 OFFSET(TI_addr_limit, thread_info, addr_limit);
12107 OFFSET(TI_preempt_count, thread_info, preempt_count);
12108 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12109 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12110
12111 BLANK();
12112 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
12113 @@ -53,8 +55,26 @@ void common(void) {
12114 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12115 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12116 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12117 +
12118 +#ifdef CONFIG_PAX_KERNEXEC
12119 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12120 #endif
12121
12122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12123 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12124 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12125 +#ifdef CONFIG_X86_64
12126 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12127 +#endif
12128 +#endif
12129 +
12130 +#endif
12131 +
12132 + BLANK();
12133 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12134 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12135 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12136 +
12137 #ifdef CONFIG_XEN
12138 BLANK();
12139 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12140 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
12141 index e72a119..6e2955d 100644
12142 --- a/arch/x86/kernel/asm-offsets_64.c
12143 +++ b/arch/x86/kernel/asm-offsets_64.c
12144 @@ -69,6 +69,7 @@ int main(void)
12145 BLANK();
12146 #undef ENTRY
12147
12148 + DEFINE(TSS_size, sizeof(struct tss_struct));
12149 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
12150 BLANK();
12151
12152 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
12153 index 25f24dc..4094a7f 100644
12154 --- a/arch/x86/kernel/cpu/Makefile
12155 +++ b/arch/x86/kernel/cpu/Makefile
12156 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
12157 CFLAGS_REMOVE_perf_event.o = -pg
12158 endif
12159
12160 -# Make sure load_percpu_segment has no stackprotector
12161 -nostackp := $(call cc-option, -fno-stack-protector)
12162 -CFLAGS_common.o := $(nostackp)
12163 -
12164 obj-y := intel_cacheinfo.o scattered.o topology.o
12165 obj-y += proc.o capflags.o powerflags.o common.o
12166 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
12167 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
12168 index 0bab2b1..d0a1bf8 100644
12169 --- a/arch/x86/kernel/cpu/amd.c
12170 +++ b/arch/x86/kernel/cpu/amd.c
12171 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
12172 unsigned int size)
12173 {
12174 /* AMD errata T13 (order #21922) */
12175 - if ((c->x86 == 6)) {
12176 + if (c->x86 == 6) {
12177 /* Duron Rev A0 */
12178 if (c->x86_model == 3 && c->x86_mask == 0)
12179 size = 64;
12180 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
12181 index aa003b1..47ea638 100644
12182 --- a/arch/x86/kernel/cpu/common.c
12183 +++ b/arch/x86/kernel/cpu/common.c
12184 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
12185
12186 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12187
12188 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12189 -#ifdef CONFIG_X86_64
12190 - /*
12191 - * We need valid kernel segments for data and code in long mode too
12192 - * IRET will check the segment types kkeil 2000/10/28
12193 - * Also sysret mandates a special GDT layout
12194 - *
12195 - * TLS descriptors are currently at a different place compared to i386.
12196 - * Hopefully nobody expects them at a fixed place (Wine?)
12197 - */
12198 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12199 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12200 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12201 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12202 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12203 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12204 -#else
12205 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12206 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12207 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12208 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12209 - /*
12210 - * Segments used for calling PnP BIOS have byte granularity.
12211 - * They code segments and data segments have fixed 64k limits,
12212 - * the transfer segment sizes are set at run time.
12213 - */
12214 - /* 32-bit code */
12215 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12216 - /* 16-bit code */
12217 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12218 - /* 16-bit data */
12219 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12220 - /* 16-bit data */
12221 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12222 - /* 16-bit data */
12223 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12224 - /*
12225 - * The APM segments have byte granularity and their bases
12226 - * are set at run time. All have 64k limits.
12227 - */
12228 - /* 32-bit code */
12229 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12230 - /* 16-bit code */
12231 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12232 - /* data */
12233 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12234 -
12235 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12236 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12237 - GDT_STACK_CANARY_INIT
12238 -#endif
12239 -} };
12240 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12241 -
12242 static int __init x86_xsave_setup(char *s)
12243 {
12244 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12245 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12246 {
12247 struct desc_ptr gdt_descr;
12248
12249 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12250 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12251 gdt_descr.size = GDT_SIZE - 1;
12252 load_gdt(&gdt_descr);
12253 /* Reload the per-cpu base */
12254 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12255 /* Filter out anything that depends on CPUID levels we don't have */
12256 filter_cpuid_features(c, true);
12257
12258 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12259 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12260 +#endif
12261 +
12262 /* If the model name is still unset, do table lookup. */
12263 if (!c->x86_model_id[0]) {
12264 const char *p;
12265 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12266 }
12267 __setup("clearcpuid=", setup_disablecpuid);
12268
12269 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12270 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12271 +
12272 #ifdef CONFIG_X86_64
12273 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12274
12275 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12276 EXPORT_PER_CPU_SYMBOL(current_task);
12277
12278 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12279 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12280 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12281 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12282
12283 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12284 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12285 {
12286 memset(regs, 0, sizeof(struct pt_regs));
12287 regs->fs = __KERNEL_PERCPU;
12288 - regs->gs = __KERNEL_STACK_CANARY;
12289 + savesegment(gs, regs->gs);
12290
12291 return regs;
12292 }
12293 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12294 int i;
12295
12296 cpu = stack_smp_processor_id();
12297 - t = &per_cpu(init_tss, cpu);
12298 + t = init_tss + cpu;
12299 oist = &per_cpu(orig_ist, cpu);
12300
12301 #ifdef CONFIG_NUMA
12302 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12303 switch_to_new_gdt(cpu);
12304 loadsegment(fs, 0);
12305
12306 - load_idt((const struct desc_ptr *)&idt_descr);
12307 + load_idt(&idt_descr);
12308
12309 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12310 syscall_init();
12311 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12312 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12313 barrier();
12314
12315 - x86_configure_nx();
12316 if (cpu != 0)
12317 enable_x2apic();
12318
12319 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12320 {
12321 int cpu = smp_processor_id();
12322 struct task_struct *curr = current;
12323 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12324 + struct tss_struct *t = init_tss + cpu;
12325 struct thread_struct *thread = &curr->thread;
12326
12327 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12328 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12329 index 5231312..a78a987 100644
12330 --- a/arch/x86/kernel/cpu/intel.c
12331 +++ b/arch/x86/kernel/cpu/intel.c
12332 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12333 * Update the IDT descriptor and reload the IDT so that
12334 * it uses the read-only mapped virtual address.
12335 */
12336 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12337 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12338 load_idt(&idt_descr);
12339 }
12340 #endif
12341 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12342 index 2af127d..8ff7ac0 100644
12343 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12344 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12345 @@ -42,6 +42,7 @@
12346 #include <asm/processor.h>
12347 #include <asm/mce.h>
12348 #include <asm/msr.h>
12349 +#include <asm/local.h>
12350
12351 #include "mce-internal.h"
12352
12353 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12354 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12355 m->cs, m->ip);
12356
12357 - if (m->cs == __KERNEL_CS)
12358 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12359 print_symbol("{%s}", m->ip);
12360 pr_cont("\n");
12361 }
12362 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12363
12364 #define PANIC_TIMEOUT 5 /* 5 seconds */
12365
12366 -static atomic_t mce_paniced;
12367 +static atomic_unchecked_t mce_paniced;
12368
12369 static int fake_panic;
12370 -static atomic_t mce_fake_paniced;
12371 +static atomic_unchecked_t mce_fake_paniced;
12372
12373 /* Panic in progress. Enable interrupts and wait for final IPI */
12374 static void wait_for_panic(void)
12375 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12376 /*
12377 * Make sure only one CPU runs in machine check panic
12378 */
12379 - if (atomic_inc_return(&mce_paniced) > 1)
12380 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12381 wait_for_panic();
12382 barrier();
12383
12384 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12385 console_verbose();
12386 } else {
12387 /* Don't log too much for fake panic */
12388 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12389 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12390 return;
12391 }
12392 /* First print corrected ones that are still unlogged */
12393 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12394 * might have been modified by someone else.
12395 */
12396 rmb();
12397 - if (atomic_read(&mce_paniced))
12398 + if (atomic_read_unchecked(&mce_paniced))
12399 wait_for_panic();
12400 if (!monarch_timeout)
12401 goto out;
12402 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12403 }
12404
12405 /* Call the installed machine check handler for this CPU setup. */
12406 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12407 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12408 unexpected_machine_check;
12409
12410 /*
12411 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12412 return;
12413 }
12414
12415 + pax_open_kernel();
12416 machine_check_vector = do_machine_check;
12417 + pax_close_kernel();
12418
12419 __mcheck_cpu_init_generic();
12420 __mcheck_cpu_init_vendor(c);
12421 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12422 */
12423
12424 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12425 -static int mce_chrdev_open_count; /* #times opened */
12426 +static local_t mce_chrdev_open_count; /* #times opened */
12427 static int mce_chrdev_open_exclu; /* already open exclusive? */
12428
12429 static int mce_chrdev_open(struct inode *inode, struct file *file)
12430 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12431 spin_lock(&mce_chrdev_state_lock);
12432
12433 if (mce_chrdev_open_exclu ||
12434 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12435 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12436 spin_unlock(&mce_chrdev_state_lock);
12437
12438 return -EBUSY;
12439 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12440
12441 if (file->f_flags & O_EXCL)
12442 mce_chrdev_open_exclu = 1;
12443 - mce_chrdev_open_count++;
12444 + local_inc(&mce_chrdev_open_count);
12445
12446 spin_unlock(&mce_chrdev_state_lock);
12447
12448 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12449 {
12450 spin_lock(&mce_chrdev_state_lock);
12451
12452 - mce_chrdev_open_count--;
12453 + local_dec(&mce_chrdev_open_count);
12454 mce_chrdev_open_exclu = 0;
12455
12456 spin_unlock(&mce_chrdev_state_lock);
12457 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12458 static void mce_reset(void)
12459 {
12460 cpu_missing = 0;
12461 - atomic_set(&mce_fake_paniced, 0);
12462 + atomic_set_unchecked(&mce_fake_paniced, 0);
12463 atomic_set(&mce_executing, 0);
12464 atomic_set(&mce_callin, 0);
12465 atomic_set(&global_nwo, 0);
12466 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12467 index 5c0e653..0882b0a 100644
12468 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12469 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12470 @@ -12,6 +12,7 @@
12471 #include <asm/system.h>
12472 #include <asm/mce.h>
12473 #include <asm/msr.h>
12474 +#include <asm/pgtable.h>
12475
12476 /* By default disabled */
12477 int mce_p5_enabled __read_mostly;
12478 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12479 if (!cpu_has(c, X86_FEATURE_MCE))
12480 return;
12481
12482 + pax_open_kernel();
12483 machine_check_vector = pentium_machine_check;
12484 + pax_close_kernel();
12485 /* Make sure the vector pointer is visible before we enable MCEs: */
12486 wmb();
12487
12488 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12489 index 54060f5..c1a7577 100644
12490 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12491 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12492 @@ -11,6 +11,7 @@
12493 #include <asm/system.h>
12494 #include <asm/mce.h>
12495 #include <asm/msr.h>
12496 +#include <asm/pgtable.h>
12497
12498 /* Machine check handler for WinChip C6: */
12499 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12500 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12501 {
12502 u32 lo, hi;
12503
12504 + pax_open_kernel();
12505 machine_check_vector = winchip_machine_check;
12506 + pax_close_kernel();
12507 /* Make sure the vector pointer is visible before we enable MCEs: */
12508 wmb();
12509
12510 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12511 index 6b96110..0da73eb 100644
12512 --- a/arch/x86/kernel/cpu/mtrr/main.c
12513 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12514 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12515 u64 size_or_mask, size_and_mask;
12516 static bool mtrr_aps_delayed_init;
12517
12518 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12519 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12520
12521 const struct mtrr_ops *mtrr_if;
12522
12523 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12524 index df5e41f..816c719 100644
12525 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12526 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12527 @@ -25,7 +25,7 @@ struct mtrr_ops {
12528 int (*validate_add_page)(unsigned long base, unsigned long size,
12529 unsigned int type);
12530 int (*have_wrcomb)(void);
12531 -};
12532 +} __do_const;
12533
12534 extern int generic_get_free_region(unsigned long base, unsigned long size,
12535 int replace_reg);
12536 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12537 index 2bda212..78cc605 100644
12538 --- a/arch/x86/kernel/cpu/perf_event.c
12539 +++ b/arch/x86/kernel/cpu/perf_event.c
12540 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12541 break;
12542
12543 perf_callchain_store(entry, frame.return_address);
12544 - fp = frame.next_frame;
12545 + fp = (const void __force_user *)frame.next_frame;
12546 }
12547 }
12548
12549 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12550 index 13ad899..f642b9a 100644
12551 --- a/arch/x86/kernel/crash.c
12552 +++ b/arch/x86/kernel/crash.c
12553 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12554 {
12555 #ifdef CONFIG_X86_32
12556 struct pt_regs fixed_regs;
12557 -#endif
12558
12559 -#ifdef CONFIG_X86_32
12560 - if (!user_mode_vm(regs)) {
12561 + if (!user_mode(regs)) {
12562 crash_fixup_ss_esp(&fixed_regs, regs);
12563 regs = &fixed_regs;
12564 }
12565 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12566 index 37250fe..bf2ec74 100644
12567 --- a/arch/x86/kernel/doublefault_32.c
12568 +++ b/arch/x86/kernel/doublefault_32.c
12569 @@ -11,7 +11,7 @@
12570
12571 #define DOUBLEFAULT_STACKSIZE (1024)
12572 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12573 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12574 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12575
12576 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12577
12578 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12579 unsigned long gdt, tss;
12580
12581 store_gdt(&gdt_desc);
12582 - gdt = gdt_desc.address;
12583 + gdt = (unsigned long)gdt_desc.address;
12584
12585 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12586
12587 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12588 /* 0x2 bit is always set */
12589 .flags = X86_EFLAGS_SF | 0x2,
12590 .sp = STACK_START,
12591 - .es = __USER_DS,
12592 + .es = __KERNEL_DS,
12593 .cs = __KERNEL_CS,
12594 .ss = __KERNEL_DS,
12595 - .ds = __USER_DS,
12596 + .ds = __KERNEL_DS,
12597 .fs = __KERNEL_PERCPU,
12598
12599 .__cr3 = __pa_nodebug(swapper_pg_dir),
12600 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12601 index 1aae78f..aab3a3d 100644
12602 --- a/arch/x86/kernel/dumpstack.c
12603 +++ b/arch/x86/kernel/dumpstack.c
12604 @@ -2,6 +2,9 @@
12605 * Copyright (C) 1991, 1992 Linus Torvalds
12606 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12607 */
12608 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12609 +#define __INCLUDED_BY_HIDESYM 1
12610 +#endif
12611 #include <linux/kallsyms.h>
12612 #include <linux/kprobes.h>
12613 #include <linux/uaccess.h>
12614 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12615 static void
12616 print_ftrace_graph_addr(unsigned long addr, void *data,
12617 const struct stacktrace_ops *ops,
12618 - struct thread_info *tinfo, int *graph)
12619 + struct task_struct *task, int *graph)
12620 {
12621 - struct task_struct *task = tinfo->task;
12622 unsigned long ret_addr;
12623 int index = task->curr_ret_stack;
12624
12625 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12626 static inline void
12627 print_ftrace_graph_addr(unsigned long addr, void *data,
12628 const struct stacktrace_ops *ops,
12629 - struct thread_info *tinfo, int *graph)
12630 + struct task_struct *task, int *graph)
12631 { }
12632 #endif
12633
12634 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12635 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12636 */
12637
12638 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12639 - void *p, unsigned int size, void *end)
12640 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12641 {
12642 - void *t = tinfo;
12643 if (end) {
12644 if (p < end && p >= (end-THREAD_SIZE))
12645 return 1;
12646 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12647 }
12648
12649 unsigned long
12650 -print_context_stack(struct thread_info *tinfo,
12651 +print_context_stack(struct task_struct *task, void *stack_start,
12652 unsigned long *stack, unsigned long bp,
12653 const struct stacktrace_ops *ops, void *data,
12654 unsigned long *end, int *graph)
12655 {
12656 struct stack_frame *frame = (struct stack_frame *)bp;
12657
12658 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12659 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12660 unsigned long addr;
12661
12662 addr = *stack;
12663 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12664 } else {
12665 ops->address(data, addr, 0);
12666 }
12667 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12668 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12669 }
12670 stack++;
12671 }
12672 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12673 EXPORT_SYMBOL_GPL(print_context_stack);
12674
12675 unsigned long
12676 -print_context_stack_bp(struct thread_info *tinfo,
12677 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12678 unsigned long *stack, unsigned long bp,
12679 const struct stacktrace_ops *ops, void *data,
12680 unsigned long *end, int *graph)
12681 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12682 struct stack_frame *frame = (struct stack_frame *)bp;
12683 unsigned long *ret_addr = &frame->return_address;
12684
12685 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12686 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12687 unsigned long addr = *ret_addr;
12688
12689 if (!__kernel_text_address(addr))
12690 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12691 ops->address(data, addr, 1);
12692 frame = frame->next_frame;
12693 ret_addr = &frame->return_address;
12694 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12695 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12696 }
12697
12698 return (unsigned long)frame;
12699 @@ -186,7 +186,7 @@ void dump_stack(void)
12700
12701 bp = stack_frame(current, NULL);
12702 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12703 - current->pid, current->comm, print_tainted(),
12704 + task_pid_nr(current), current->comm, print_tainted(),
12705 init_utsname()->release,
12706 (int)strcspn(init_utsname()->version, " "),
12707 init_utsname()->version);
12708 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12709 }
12710 EXPORT_SYMBOL_GPL(oops_begin);
12711
12712 +extern void gr_handle_kernel_exploit(void);
12713 +
12714 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12715 {
12716 if (regs && kexec_should_crash(current))
12717 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12718 panic("Fatal exception in interrupt");
12719 if (panic_on_oops)
12720 panic("Fatal exception");
12721 - do_exit(signr);
12722 +
12723 + gr_handle_kernel_exploit();
12724 +
12725 + do_group_exit(signr);
12726 }
12727
12728 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12729 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12730
12731 show_registers(regs);
12732 #ifdef CONFIG_X86_32
12733 - if (user_mode_vm(regs)) {
12734 + if (user_mode(regs)) {
12735 sp = regs->sp;
12736 ss = regs->ss & 0xffff;
12737 } else {
12738 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12739 unsigned long flags = oops_begin();
12740 int sig = SIGSEGV;
12741
12742 - if (!user_mode_vm(regs))
12743 + if (!user_mode(regs))
12744 report_bug(regs->ip, regs);
12745
12746 if (__die(str, regs, err))
12747 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12748 index c99f9ed..2a15d80 100644
12749 --- a/arch/x86/kernel/dumpstack_32.c
12750 +++ b/arch/x86/kernel/dumpstack_32.c
12751 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12752 bp = stack_frame(task, regs);
12753
12754 for (;;) {
12755 - struct thread_info *context;
12756 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12757
12758 - context = (struct thread_info *)
12759 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12760 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12761 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12762
12763 - stack = (unsigned long *)context->previous_esp;
12764 - if (!stack)
12765 + if (stack_start == task_stack_page(task))
12766 break;
12767 + stack = *(unsigned long **)stack_start;
12768 if (ops->stack(data, "IRQ") < 0)
12769 break;
12770 touch_nmi_watchdog();
12771 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12772 * When in-kernel, we also print out the stack and code at the
12773 * time of the fault..
12774 */
12775 - if (!user_mode_vm(regs)) {
12776 + if (!user_mode(regs)) {
12777 unsigned int code_prologue = code_bytes * 43 / 64;
12778 unsigned int code_len = code_bytes;
12779 unsigned char c;
12780 u8 *ip;
12781 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12782
12783 printk(KERN_EMERG "Stack:\n");
12784 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12785
12786 printk(KERN_EMERG "Code: ");
12787
12788 - ip = (u8 *)regs->ip - code_prologue;
12789 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12790 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12791 /* try starting at IP */
12792 - ip = (u8 *)regs->ip;
12793 + ip = (u8 *)regs->ip + cs_base;
12794 code_len = code_len - code_prologue + 1;
12795 }
12796 for (i = 0; i < code_len; i++, ip++) {
12797 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12798 printk(KERN_CONT " Bad EIP value.");
12799 break;
12800 }
12801 - if (ip == (u8 *)regs->ip)
12802 + if (ip == (u8 *)regs->ip + cs_base)
12803 printk(KERN_CONT "<%02x> ", c);
12804 else
12805 printk(KERN_CONT "%02x ", c);
12806 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12807 {
12808 unsigned short ud2;
12809
12810 + ip = ktla_ktva(ip);
12811 if (ip < PAGE_OFFSET)
12812 return 0;
12813 if (probe_kernel_address((unsigned short *)ip, ud2))
12814 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12815
12816 return ud2 == 0x0b0f;
12817 }
12818 +
12819 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12820 +void pax_check_alloca(unsigned long size)
12821 +{
12822 + unsigned long sp = (unsigned long)&sp, stack_left;
12823 +
12824 + /* all kernel stacks are of the same size */
12825 + stack_left = sp & (THREAD_SIZE - 1);
12826 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12827 +}
12828 +EXPORT_SYMBOL(pax_check_alloca);
12829 +#endif
12830 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12831 index 6d728d9..279514e 100644
12832 --- a/arch/x86/kernel/dumpstack_64.c
12833 +++ b/arch/x86/kernel/dumpstack_64.c
12834 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12835 unsigned long *irq_stack_end =
12836 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12837 unsigned used = 0;
12838 - struct thread_info *tinfo;
12839 int graph = 0;
12840 unsigned long dummy;
12841 + void *stack_start;
12842
12843 if (!task)
12844 task = current;
12845 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12846 * current stack address. If the stacks consist of nested
12847 * exceptions
12848 */
12849 - tinfo = task_thread_info(task);
12850 for (;;) {
12851 char *id;
12852 unsigned long *estack_end;
12853 +
12854 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12855 &used, &id);
12856
12857 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12858 if (ops->stack(data, id) < 0)
12859 break;
12860
12861 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12862 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12863 data, estack_end, &graph);
12864 ops->stack(data, "<EOE>");
12865 /*
12866 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12867 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12868 if (ops->stack(data, "IRQ") < 0)
12869 break;
12870 - bp = ops->walk_stack(tinfo, stack, bp,
12871 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12872 ops, data, irq_stack_end, &graph);
12873 /*
12874 * We link to the next stack (which would be
12875 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12876 /*
12877 * This handles the process stack:
12878 */
12879 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12880 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12881 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12882 put_cpu();
12883 }
12884 EXPORT_SYMBOL(dump_trace);
12885 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12886
12887 return ud2 == 0x0b0f;
12888 }
12889 +
12890 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12891 +void pax_check_alloca(unsigned long size)
12892 +{
12893 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12894 + unsigned cpu, used;
12895 + char *id;
12896 +
12897 + /* check the process stack first */
12898 + stack_start = (unsigned long)task_stack_page(current);
12899 + stack_end = stack_start + THREAD_SIZE;
12900 + if (likely(stack_start <= sp && sp < stack_end)) {
12901 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12902 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12903 + return;
12904 + }
12905 +
12906 + cpu = get_cpu();
12907 +
12908 + /* check the irq stacks */
12909 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12910 + stack_start = stack_end - IRQ_STACK_SIZE;
12911 + if (stack_start <= sp && sp < stack_end) {
12912 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12913 + put_cpu();
12914 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12915 + return;
12916 + }
12917 +
12918 + /* check the exception stacks */
12919 + used = 0;
12920 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12921 + stack_start = stack_end - EXCEPTION_STKSZ;
12922 + if (stack_end && stack_start <= sp && sp < stack_end) {
12923 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12924 + put_cpu();
12925 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12926 + return;
12927 + }
12928 +
12929 + put_cpu();
12930 +
12931 + /* unknown stack */
12932 + BUG();
12933 +}
12934 +EXPORT_SYMBOL(pax_check_alloca);
12935 +#endif
12936 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12937 index cd28a35..c72ed9a 100644
12938 --- a/arch/x86/kernel/early_printk.c
12939 +++ b/arch/x86/kernel/early_printk.c
12940 @@ -7,6 +7,7 @@
12941 #include <linux/pci_regs.h>
12942 #include <linux/pci_ids.h>
12943 #include <linux/errno.h>
12944 +#include <linux/sched.h>
12945 #include <asm/io.h>
12946 #include <asm/processor.h>
12947 #include <asm/fcntl.h>
12948 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12949 index f3f6f53..0841b66 100644
12950 --- a/arch/x86/kernel/entry_32.S
12951 +++ b/arch/x86/kernel/entry_32.S
12952 @@ -186,13 +186,146 @@
12953 /*CFI_REL_OFFSET gs, PT_GS*/
12954 .endm
12955 .macro SET_KERNEL_GS reg
12956 +
12957 +#ifdef CONFIG_CC_STACKPROTECTOR
12958 movl $(__KERNEL_STACK_CANARY), \reg
12959 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12960 + movl $(__USER_DS), \reg
12961 +#else
12962 + xorl \reg, \reg
12963 +#endif
12964 +
12965 movl \reg, %gs
12966 .endm
12967
12968 #endif /* CONFIG_X86_32_LAZY_GS */
12969
12970 -.macro SAVE_ALL
12971 +.macro pax_enter_kernel
12972 +#ifdef CONFIG_PAX_KERNEXEC
12973 + call pax_enter_kernel
12974 +#endif
12975 +.endm
12976 +
12977 +.macro pax_exit_kernel
12978 +#ifdef CONFIG_PAX_KERNEXEC
12979 + call pax_exit_kernel
12980 +#endif
12981 +.endm
12982 +
12983 +#ifdef CONFIG_PAX_KERNEXEC
12984 +ENTRY(pax_enter_kernel)
12985 +#ifdef CONFIG_PARAVIRT
12986 + pushl %eax
12987 + pushl %ecx
12988 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12989 + mov %eax, %esi
12990 +#else
12991 + mov %cr0, %esi
12992 +#endif
12993 + bts $16, %esi
12994 + jnc 1f
12995 + mov %cs, %esi
12996 + cmp $__KERNEL_CS, %esi
12997 + jz 3f
12998 + ljmp $__KERNEL_CS, $3f
12999 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13000 +2:
13001 +#ifdef CONFIG_PARAVIRT
13002 + mov %esi, %eax
13003 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13004 +#else
13005 + mov %esi, %cr0
13006 +#endif
13007 +3:
13008 +#ifdef CONFIG_PARAVIRT
13009 + popl %ecx
13010 + popl %eax
13011 +#endif
13012 + ret
13013 +ENDPROC(pax_enter_kernel)
13014 +
13015 +ENTRY(pax_exit_kernel)
13016 +#ifdef CONFIG_PARAVIRT
13017 + pushl %eax
13018 + pushl %ecx
13019 +#endif
13020 + mov %cs, %esi
13021 + cmp $__KERNEXEC_KERNEL_CS, %esi
13022 + jnz 2f
13023 +#ifdef CONFIG_PARAVIRT
13024 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13025 + mov %eax, %esi
13026 +#else
13027 + mov %cr0, %esi
13028 +#endif
13029 + btr $16, %esi
13030 + ljmp $__KERNEL_CS, $1f
13031 +1:
13032 +#ifdef CONFIG_PARAVIRT
13033 + mov %esi, %eax
13034 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13035 +#else
13036 + mov %esi, %cr0
13037 +#endif
13038 +2:
13039 +#ifdef CONFIG_PARAVIRT
13040 + popl %ecx
13041 + popl %eax
13042 +#endif
13043 + ret
13044 +ENDPROC(pax_exit_kernel)
13045 +#endif
13046 +
13047 +.macro pax_erase_kstack
13048 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13049 + call pax_erase_kstack
13050 +#endif
13051 +.endm
13052 +
13053 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13054 +/*
13055 + * ebp: thread_info
13056 + * ecx, edx: can be clobbered
13057 + */
13058 +ENTRY(pax_erase_kstack)
13059 + pushl %edi
13060 + pushl %eax
13061 +
13062 + mov TI_lowest_stack(%ebp), %edi
13063 + mov $-0xBEEF, %eax
13064 + std
13065 +
13066 +1: mov %edi, %ecx
13067 + and $THREAD_SIZE_asm - 1, %ecx
13068 + shr $2, %ecx
13069 + repne scasl
13070 + jecxz 2f
13071 +
13072 + cmp $2*16, %ecx
13073 + jc 2f
13074 +
13075 + mov $2*16, %ecx
13076 + repe scasl
13077 + jecxz 2f
13078 + jne 1b
13079 +
13080 +2: cld
13081 + mov %esp, %ecx
13082 + sub %edi, %ecx
13083 + shr $2, %ecx
13084 + rep stosl
13085 +
13086 + mov TI_task_thread_sp0(%ebp), %edi
13087 + sub $128, %edi
13088 + mov %edi, TI_lowest_stack(%ebp)
13089 +
13090 + popl %eax
13091 + popl %edi
13092 + ret
13093 +ENDPROC(pax_erase_kstack)
13094 +#endif
13095 +
13096 +.macro __SAVE_ALL _DS
13097 cld
13098 PUSH_GS
13099 pushl_cfi %fs
13100 @@ -215,7 +348,7 @@
13101 CFI_REL_OFFSET ecx, 0
13102 pushl_cfi %ebx
13103 CFI_REL_OFFSET ebx, 0
13104 - movl $(__USER_DS), %edx
13105 + movl $\_DS, %edx
13106 movl %edx, %ds
13107 movl %edx, %es
13108 movl $(__KERNEL_PERCPU), %edx
13109 @@ -223,6 +356,15 @@
13110 SET_KERNEL_GS %edx
13111 .endm
13112
13113 +.macro SAVE_ALL
13114 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13115 + __SAVE_ALL __KERNEL_DS
13116 + pax_enter_kernel
13117 +#else
13118 + __SAVE_ALL __USER_DS
13119 +#endif
13120 +.endm
13121 +
13122 .macro RESTORE_INT_REGS
13123 popl_cfi %ebx
13124 CFI_RESTORE ebx
13125 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
13126 popfl_cfi
13127 jmp syscall_exit
13128 CFI_ENDPROC
13129 -END(ret_from_fork)
13130 +ENDPROC(ret_from_fork)
13131
13132 /*
13133 * Interrupt exit functions should be protected against kprobes
13134 @@ -333,7 +475,15 @@ check_userspace:
13135 movb PT_CS(%esp), %al
13136 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13137 cmpl $USER_RPL, %eax
13138 +
13139 +#ifdef CONFIG_PAX_KERNEXEC
13140 + jae resume_userspace
13141 +
13142 + PAX_EXIT_KERNEL
13143 + jmp resume_kernel
13144 +#else
13145 jb resume_kernel # not returning to v8086 or userspace
13146 +#endif
13147
13148 ENTRY(resume_userspace)
13149 LOCKDEP_SYS_EXIT
13150 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
13151 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13152 # int/exception return?
13153 jne work_pending
13154 - jmp restore_all
13155 -END(ret_from_exception)
13156 + jmp restore_all_pax
13157 +ENDPROC(ret_from_exception)
13158
13159 #ifdef CONFIG_PREEMPT
13160 ENTRY(resume_kernel)
13161 @@ -361,7 +511,7 @@ need_resched:
13162 jz restore_all
13163 call preempt_schedule_irq
13164 jmp need_resched
13165 -END(resume_kernel)
13166 +ENDPROC(resume_kernel)
13167 #endif
13168 CFI_ENDPROC
13169 /*
13170 @@ -395,23 +545,34 @@ sysenter_past_esp:
13171 /*CFI_REL_OFFSET cs, 0*/
13172 /*
13173 * Push current_thread_info()->sysenter_return to the stack.
13174 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13175 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13176 */
13177 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
13178 + pushl_cfi $0
13179 CFI_REL_OFFSET eip, 0
13180
13181 pushl_cfi %eax
13182 SAVE_ALL
13183 + GET_THREAD_INFO(%ebp)
13184 + movl TI_sysenter_return(%ebp),%ebp
13185 + movl %ebp,PT_EIP(%esp)
13186 ENABLE_INTERRUPTS(CLBR_NONE)
13187
13188 /*
13189 * Load the potential sixth argument from user stack.
13190 * Careful about security.
13191 */
13192 + movl PT_OLDESP(%esp),%ebp
13193 +
13194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13195 + mov PT_OLDSS(%esp),%ds
13196 +1: movl %ds:(%ebp),%ebp
13197 + push %ss
13198 + pop %ds
13199 +#else
13200 cmpl $__PAGE_OFFSET-3,%ebp
13201 jae syscall_fault
13202 1: movl (%ebp),%ebp
13203 +#endif
13204 +
13205 movl %ebp,PT_EBP(%esp)
13206 .section __ex_table,"a"
13207 .align 4
13208 @@ -434,12 +595,24 @@ sysenter_do_call:
13209 testl $_TIF_ALLWORK_MASK, %ecx
13210 jne sysexit_audit
13211 sysenter_exit:
13212 +
13213 +#ifdef CONFIG_PAX_RANDKSTACK
13214 + pushl_cfi %eax
13215 + movl %esp, %eax
13216 + call pax_randomize_kstack
13217 + popl_cfi %eax
13218 +#endif
13219 +
13220 + pax_erase_kstack
13221 +
13222 /* if something modifies registers it must also disable sysexit */
13223 movl PT_EIP(%esp), %edx
13224 movl PT_OLDESP(%esp), %ecx
13225 xorl %ebp,%ebp
13226 TRACE_IRQS_ON
13227 1: mov PT_FS(%esp), %fs
13228 +2: mov PT_DS(%esp), %ds
13229 +3: mov PT_ES(%esp), %es
13230 PTGS_TO_GS
13231 ENABLE_INTERRUPTS_SYSEXIT
13232
13233 @@ -456,6 +629,9 @@ sysenter_audit:
13234 movl %eax,%edx /* 2nd arg: syscall number */
13235 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13236 call audit_syscall_entry
13237 +
13238 + pax_erase_kstack
13239 +
13240 pushl_cfi %ebx
13241 movl PT_EAX(%esp),%eax /* reload syscall number */
13242 jmp sysenter_do_call
13243 @@ -482,11 +658,17 @@ sysexit_audit:
13244
13245 CFI_ENDPROC
13246 .pushsection .fixup,"ax"
13247 -2: movl $0,PT_FS(%esp)
13248 +4: movl $0,PT_FS(%esp)
13249 + jmp 1b
13250 +5: movl $0,PT_DS(%esp)
13251 + jmp 1b
13252 +6: movl $0,PT_ES(%esp)
13253 jmp 1b
13254 .section __ex_table,"a"
13255 .align 4
13256 - .long 1b,2b
13257 + .long 1b,4b
13258 + .long 2b,5b
13259 + .long 3b,6b
13260 .popsection
13261 PTGS_TO_GS_EX
13262 ENDPROC(ia32_sysenter_target)
13263 @@ -519,6 +701,15 @@ syscall_exit:
13264 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13265 jne syscall_exit_work
13266
13267 +restore_all_pax:
13268 +
13269 +#ifdef CONFIG_PAX_RANDKSTACK
13270 + movl %esp, %eax
13271 + call pax_randomize_kstack
13272 +#endif
13273 +
13274 + pax_erase_kstack
13275 +
13276 restore_all:
13277 TRACE_IRQS_IRET
13278 restore_all_notrace:
13279 @@ -578,14 +769,34 @@ ldt_ss:
13280 * compensating for the offset by changing to the ESPFIX segment with
13281 * a base address that matches for the difference.
13282 */
13283 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13284 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13285 mov %esp, %edx /* load kernel esp */
13286 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13287 mov %dx, %ax /* eax: new kernel esp */
13288 sub %eax, %edx /* offset (low word is 0) */
13289 +#ifdef CONFIG_SMP
13290 + movl PER_CPU_VAR(cpu_number), %ebx
13291 + shll $PAGE_SHIFT_asm, %ebx
13292 + addl $cpu_gdt_table, %ebx
13293 +#else
13294 + movl $cpu_gdt_table, %ebx
13295 +#endif
13296 shr $16, %edx
13297 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13298 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13299 +
13300 +#ifdef CONFIG_PAX_KERNEXEC
13301 + mov %cr0, %esi
13302 + btr $16, %esi
13303 + mov %esi, %cr0
13304 +#endif
13305 +
13306 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13307 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13308 +
13309 +#ifdef CONFIG_PAX_KERNEXEC
13310 + bts $16, %esi
13311 + mov %esi, %cr0
13312 +#endif
13313 +
13314 pushl_cfi $__ESPFIX_SS
13315 pushl_cfi %eax /* new kernel esp */
13316 /* Disable interrupts, but do not irqtrace this section: we
13317 @@ -614,34 +825,28 @@ work_resched:
13318 movl TI_flags(%ebp), %ecx
13319 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13320 # than syscall tracing?
13321 - jz restore_all
13322 + jz restore_all_pax
13323 testb $_TIF_NEED_RESCHED, %cl
13324 jnz work_resched
13325
13326 work_notifysig: # deal with pending signals and
13327 # notify-resume requests
13328 + movl %esp, %eax
13329 #ifdef CONFIG_VM86
13330 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13331 - movl %esp, %eax
13332 - jne work_notifysig_v86 # returning to kernel-space or
13333 + jz 1f # returning to kernel-space or
13334 # vm86-space
13335 - xorl %edx, %edx
13336 - call do_notify_resume
13337 - jmp resume_userspace_sig
13338
13339 - ALIGN
13340 -work_notifysig_v86:
13341 pushl_cfi %ecx # save ti_flags for do_notify_resume
13342 call save_v86_state # %eax contains pt_regs pointer
13343 popl_cfi %ecx
13344 movl %eax, %esp
13345 -#else
13346 - movl %esp, %eax
13347 +1:
13348 #endif
13349 xorl %edx, %edx
13350 call do_notify_resume
13351 jmp resume_userspace_sig
13352 -END(work_pending)
13353 +ENDPROC(work_pending)
13354
13355 # perform syscall exit tracing
13356 ALIGN
13357 @@ -649,11 +854,14 @@ syscall_trace_entry:
13358 movl $-ENOSYS,PT_EAX(%esp)
13359 movl %esp, %eax
13360 call syscall_trace_enter
13361 +
13362 + pax_erase_kstack
13363 +
13364 /* What it returned is what we'll actually use. */
13365 cmpl $(nr_syscalls), %eax
13366 jnae syscall_call
13367 jmp syscall_exit
13368 -END(syscall_trace_entry)
13369 +ENDPROC(syscall_trace_entry)
13370
13371 # perform syscall exit tracing
13372 ALIGN
13373 @@ -666,20 +874,24 @@ syscall_exit_work:
13374 movl %esp, %eax
13375 call syscall_trace_leave
13376 jmp resume_userspace
13377 -END(syscall_exit_work)
13378 +ENDPROC(syscall_exit_work)
13379 CFI_ENDPROC
13380
13381 RING0_INT_FRAME # can't unwind into user space anyway
13382 syscall_fault:
13383 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13384 + push %ss
13385 + pop %ds
13386 +#endif
13387 GET_THREAD_INFO(%ebp)
13388 movl $-EFAULT,PT_EAX(%esp)
13389 jmp resume_userspace
13390 -END(syscall_fault)
13391 +ENDPROC(syscall_fault)
13392
13393 syscall_badsys:
13394 movl $-ENOSYS,PT_EAX(%esp)
13395 jmp resume_userspace
13396 -END(syscall_badsys)
13397 +ENDPROC(syscall_badsys)
13398 CFI_ENDPROC
13399 /*
13400 * End of kprobes section
13401 @@ -753,6 +965,36 @@ ptregs_clone:
13402 CFI_ENDPROC
13403 ENDPROC(ptregs_clone)
13404
13405 + ALIGN;
13406 +ENTRY(kernel_execve)
13407 + CFI_STARTPROC
13408 + pushl_cfi %ebp
13409 + sub $PT_OLDSS+4,%esp
13410 + pushl_cfi %edi
13411 + pushl_cfi %ecx
13412 + pushl_cfi %eax
13413 + lea 3*4(%esp),%edi
13414 + mov $PT_OLDSS/4+1,%ecx
13415 + xorl %eax,%eax
13416 + rep stosl
13417 + popl_cfi %eax
13418 + popl_cfi %ecx
13419 + popl_cfi %edi
13420 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13421 + pushl_cfi %esp
13422 + call sys_execve
13423 + add $4,%esp
13424 + CFI_ADJUST_CFA_OFFSET -4
13425 + GET_THREAD_INFO(%ebp)
13426 + test %eax,%eax
13427 + jz syscall_exit
13428 + add $PT_OLDSS+4,%esp
13429 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13430 + popl_cfi %ebp
13431 + ret
13432 + CFI_ENDPROC
13433 +ENDPROC(kernel_execve)
13434 +
13435 .macro FIXUP_ESPFIX_STACK
13436 /*
13437 * Switch back for ESPFIX stack to the normal zerobased stack
13438 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13439 * normal stack and adjusts ESP with the matching offset.
13440 */
13441 /* fixup the stack */
13442 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13443 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13444 +#ifdef CONFIG_SMP
13445 + movl PER_CPU_VAR(cpu_number), %ebx
13446 + shll $PAGE_SHIFT_asm, %ebx
13447 + addl $cpu_gdt_table, %ebx
13448 +#else
13449 + movl $cpu_gdt_table, %ebx
13450 +#endif
13451 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13452 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13453 shl $16, %eax
13454 addl %esp, %eax /* the adjusted stack pointer */
13455 pushl_cfi $__KERNEL_DS
13456 @@ -816,7 +1065,7 @@ vector=vector+1
13457 .endr
13458 2: jmp common_interrupt
13459 .endr
13460 -END(irq_entries_start)
13461 +ENDPROC(irq_entries_start)
13462
13463 .previous
13464 END(interrupt)
13465 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13466 pushl_cfi $do_coprocessor_error
13467 jmp error_code
13468 CFI_ENDPROC
13469 -END(coprocessor_error)
13470 +ENDPROC(coprocessor_error)
13471
13472 ENTRY(simd_coprocessor_error)
13473 RING0_INT_FRAME
13474 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13475 #endif
13476 jmp error_code
13477 CFI_ENDPROC
13478 -END(simd_coprocessor_error)
13479 +ENDPROC(simd_coprocessor_error)
13480
13481 ENTRY(device_not_available)
13482 RING0_INT_FRAME
13483 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13484 pushl_cfi $do_device_not_available
13485 jmp error_code
13486 CFI_ENDPROC
13487 -END(device_not_available)
13488 +ENDPROC(device_not_available)
13489
13490 #ifdef CONFIG_PARAVIRT
13491 ENTRY(native_iret)
13492 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13493 .align 4
13494 .long native_iret, iret_exc
13495 .previous
13496 -END(native_iret)
13497 +ENDPROC(native_iret)
13498
13499 ENTRY(native_irq_enable_sysexit)
13500 sti
13501 sysexit
13502 -END(native_irq_enable_sysexit)
13503 +ENDPROC(native_irq_enable_sysexit)
13504 #endif
13505
13506 ENTRY(overflow)
13507 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13508 pushl_cfi $do_overflow
13509 jmp error_code
13510 CFI_ENDPROC
13511 -END(overflow)
13512 +ENDPROC(overflow)
13513
13514 ENTRY(bounds)
13515 RING0_INT_FRAME
13516 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13517 pushl_cfi $do_bounds
13518 jmp error_code
13519 CFI_ENDPROC
13520 -END(bounds)
13521 +ENDPROC(bounds)
13522
13523 ENTRY(invalid_op)
13524 RING0_INT_FRAME
13525 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13526 pushl_cfi $do_invalid_op
13527 jmp error_code
13528 CFI_ENDPROC
13529 -END(invalid_op)
13530 +ENDPROC(invalid_op)
13531
13532 ENTRY(coprocessor_segment_overrun)
13533 RING0_INT_FRAME
13534 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13535 pushl_cfi $do_coprocessor_segment_overrun
13536 jmp error_code
13537 CFI_ENDPROC
13538 -END(coprocessor_segment_overrun)
13539 +ENDPROC(coprocessor_segment_overrun)
13540
13541 ENTRY(invalid_TSS)
13542 RING0_EC_FRAME
13543 pushl_cfi $do_invalid_TSS
13544 jmp error_code
13545 CFI_ENDPROC
13546 -END(invalid_TSS)
13547 +ENDPROC(invalid_TSS)
13548
13549 ENTRY(segment_not_present)
13550 RING0_EC_FRAME
13551 pushl_cfi $do_segment_not_present
13552 jmp error_code
13553 CFI_ENDPROC
13554 -END(segment_not_present)
13555 +ENDPROC(segment_not_present)
13556
13557 ENTRY(stack_segment)
13558 RING0_EC_FRAME
13559 pushl_cfi $do_stack_segment
13560 jmp error_code
13561 CFI_ENDPROC
13562 -END(stack_segment)
13563 +ENDPROC(stack_segment)
13564
13565 ENTRY(alignment_check)
13566 RING0_EC_FRAME
13567 pushl_cfi $do_alignment_check
13568 jmp error_code
13569 CFI_ENDPROC
13570 -END(alignment_check)
13571 +ENDPROC(alignment_check)
13572
13573 ENTRY(divide_error)
13574 RING0_INT_FRAME
13575 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13576 pushl_cfi $do_divide_error
13577 jmp error_code
13578 CFI_ENDPROC
13579 -END(divide_error)
13580 +ENDPROC(divide_error)
13581
13582 #ifdef CONFIG_X86_MCE
13583 ENTRY(machine_check)
13584 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13585 pushl_cfi machine_check_vector
13586 jmp error_code
13587 CFI_ENDPROC
13588 -END(machine_check)
13589 +ENDPROC(machine_check)
13590 #endif
13591
13592 ENTRY(spurious_interrupt_bug)
13593 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13594 pushl_cfi $do_spurious_interrupt_bug
13595 jmp error_code
13596 CFI_ENDPROC
13597 -END(spurious_interrupt_bug)
13598 +ENDPROC(spurious_interrupt_bug)
13599 /*
13600 * End of kprobes section
13601 */
13602 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13603
13604 ENTRY(mcount)
13605 ret
13606 -END(mcount)
13607 +ENDPROC(mcount)
13608
13609 ENTRY(ftrace_caller)
13610 cmpl $0, function_trace_stop
13611 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13612 .globl ftrace_stub
13613 ftrace_stub:
13614 ret
13615 -END(ftrace_caller)
13616 +ENDPROC(ftrace_caller)
13617
13618 #else /* ! CONFIG_DYNAMIC_FTRACE */
13619
13620 @@ -1174,7 +1423,7 @@ trace:
13621 popl %ecx
13622 popl %eax
13623 jmp ftrace_stub
13624 -END(mcount)
13625 +ENDPROC(mcount)
13626 #endif /* CONFIG_DYNAMIC_FTRACE */
13627 #endif /* CONFIG_FUNCTION_TRACER */
13628
13629 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13630 popl %ecx
13631 popl %eax
13632 ret
13633 -END(ftrace_graph_caller)
13634 +ENDPROC(ftrace_graph_caller)
13635
13636 .globl return_to_handler
13637 return_to_handler:
13638 @@ -1209,7 +1458,6 @@ return_to_handler:
13639 jmp *%ecx
13640 #endif
13641
13642 -.section .rodata,"a"
13643 #include "syscall_table_32.S"
13644
13645 syscall_table_size=(.-sys_call_table)
13646 @@ -1255,15 +1503,18 @@ error_code:
13647 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13648 REG_TO_PTGS %ecx
13649 SET_KERNEL_GS %ecx
13650 - movl $(__USER_DS), %ecx
13651 + movl $(__KERNEL_DS), %ecx
13652 movl %ecx, %ds
13653 movl %ecx, %es
13654 +
13655 + pax_enter_kernel
13656 +
13657 TRACE_IRQS_OFF
13658 movl %esp,%eax # pt_regs pointer
13659 call *%edi
13660 jmp ret_from_exception
13661 CFI_ENDPROC
13662 -END(page_fault)
13663 +ENDPROC(page_fault)
13664
13665 /*
13666 * Debug traps and NMI can happen at the one SYSENTER instruction
13667 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13668 call do_debug
13669 jmp ret_from_exception
13670 CFI_ENDPROC
13671 -END(debug)
13672 +ENDPROC(debug)
13673
13674 /*
13675 * NMI is doubly nasty. It can happen _while_ we're handling
13676 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13677 xorl %edx,%edx # zero error code
13678 movl %esp,%eax # pt_regs pointer
13679 call do_nmi
13680 +
13681 + pax_exit_kernel
13682 +
13683 jmp restore_all_notrace
13684 CFI_ENDPROC
13685
13686 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13687 FIXUP_ESPFIX_STACK # %eax == %esp
13688 xorl %edx,%edx # zero error code
13689 call do_nmi
13690 +
13691 + pax_exit_kernel
13692 +
13693 RESTORE_REGS
13694 lss 12+4(%esp), %esp # back to espfix stack
13695 CFI_ADJUST_CFA_OFFSET -24
13696 jmp irq_return
13697 CFI_ENDPROC
13698 -END(nmi)
13699 +ENDPROC(nmi)
13700
13701 ENTRY(int3)
13702 RING0_INT_FRAME
13703 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13704 call do_int3
13705 jmp ret_from_exception
13706 CFI_ENDPROC
13707 -END(int3)
13708 +ENDPROC(int3)
13709
13710 ENTRY(general_protection)
13711 RING0_EC_FRAME
13712 pushl_cfi $do_general_protection
13713 jmp error_code
13714 CFI_ENDPROC
13715 -END(general_protection)
13716 +ENDPROC(general_protection)
13717
13718 #ifdef CONFIG_KVM_GUEST
13719 ENTRY(async_page_fault)
13720 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13721 pushl_cfi $do_async_page_fault
13722 jmp error_code
13723 CFI_ENDPROC
13724 -END(async_page_fault)
13725 +ENDPROC(async_page_fault)
13726 #endif
13727
13728 /*
13729 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13730 index faf8d5e..4f16a68 100644
13731 --- a/arch/x86/kernel/entry_64.S
13732 +++ b/arch/x86/kernel/entry_64.S
13733 @@ -55,6 +55,8 @@
13734 #include <asm/paravirt.h>
13735 #include <asm/ftrace.h>
13736 #include <asm/percpu.h>
13737 +#include <asm/pgtable.h>
13738 +#include <asm/alternative-asm.h>
13739
13740 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13741 #include <linux/elf-em.h>
13742 @@ -68,8 +70,9 @@
13743 #ifdef CONFIG_FUNCTION_TRACER
13744 #ifdef CONFIG_DYNAMIC_FTRACE
13745 ENTRY(mcount)
13746 + pax_force_retaddr
13747 retq
13748 -END(mcount)
13749 +ENDPROC(mcount)
13750
13751 ENTRY(ftrace_caller)
13752 cmpl $0, function_trace_stop
13753 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13754 #endif
13755
13756 GLOBAL(ftrace_stub)
13757 + pax_force_retaddr
13758 retq
13759 -END(ftrace_caller)
13760 +ENDPROC(ftrace_caller)
13761
13762 #else /* ! CONFIG_DYNAMIC_FTRACE */
13763 ENTRY(mcount)
13764 @@ -112,6 +116,7 @@ ENTRY(mcount)
13765 #endif
13766
13767 GLOBAL(ftrace_stub)
13768 + pax_force_retaddr
13769 retq
13770
13771 trace:
13772 @@ -121,12 +126,13 @@ trace:
13773 movq 8(%rbp), %rsi
13774 subq $MCOUNT_INSN_SIZE, %rdi
13775
13776 + pax_force_fptr ftrace_trace_function
13777 call *ftrace_trace_function
13778
13779 MCOUNT_RESTORE_FRAME
13780
13781 jmp ftrace_stub
13782 -END(mcount)
13783 +ENDPROC(mcount)
13784 #endif /* CONFIG_DYNAMIC_FTRACE */
13785 #endif /* CONFIG_FUNCTION_TRACER */
13786
13787 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13788
13789 MCOUNT_RESTORE_FRAME
13790
13791 + pax_force_retaddr
13792 retq
13793 -END(ftrace_graph_caller)
13794 +ENDPROC(ftrace_graph_caller)
13795
13796 GLOBAL(return_to_handler)
13797 subq $24, %rsp
13798 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13799 movq 8(%rsp), %rdx
13800 movq (%rsp), %rax
13801 addq $24, %rsp
13802 + pax_force_fptr %rdi
13803 jmp *%rdi
13804 #endif
13805
13806 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13807 ENDPROC(native_usergs_sysret64)
13808 #endif /* CONFIG_PARAVIRT */
13809
13810 + .macro ljmpq sel, off
13811 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13812 + .byte 0x48; ljmp *1234f(%rip)
13813 + .pushsection .rodata
13814 + .align 16
13815 + 1234: .quad \off; .word \sel
13816 + .popsection
13817 +#else
13818 + pushq $\sel
13819 + pushq $\off
13820 + lretq
13821 +#endif
13822 + .endm
13823 +
13824 + .macro pax_enter_kernel
13825 + pax_set_fptr_mask
13826 +#ifdef CONFIG_PAX_KERNEXEC
13827 + call pax_enter_kernel
13828 +#endif
13829 + .endm
13830 +
13831 + .macro pax_exit_kernel
13832 +#ifdef CONFIG_PAX_KERNEXEC
13833 + call pax_exit_kernel
13834 +#endif
13835 + .endm
13836 +
13837 +#ifdef CONFIG_PAX_KERNEXEC
13838 +ENTRY(pax_enter_kernel)
13839 + pushq %rdi
13840 +
13841 +#ifdef CONFIG_PARAVIRT
13842 + PV_SAVE_REGS(CLBR_RDI)
13843 +#endif
13844 +
13845 + GET_CR0_INTO_RDI
13846 + bts $16,%rdi
13847 + jnc 3f
13848 + mov %cs,%edi
13849 + cmp $__KERNEL_CS,%edi
13850 + jnz 2f
13851 +1:
13852 +
13853 +#ifdef CONFIG_PARAVIRT
13854 + PV_RESTORE_REGS(CLBR_RDI)
13855 +#endif
13856 +
13857 + popq %rdi
13858 + pax_force_retaddr
13859 + retq
13860 +
13861 +2: ljmpq __KERNEL_CS,1f
13862 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13863 +4: SET_RDI_INTO_CR0
13864 + jmp 1b
13865 +ENDPROC(pax_enter_kernel)
13866 +
13867 +ENTRY(pax_exit_kernel)
13868 + pushq %rdi
13869 +
13870 +#ifdef CONFIG_PARAVIRT
13871 + PV_SAVE_REGS(CLBR_RDI)
13872 +#endif
13873 +
13874 + mov %cs,%rdi
13875 + cmp $__KERNEXEC_KERNEL_CS,%edi
13876 + jz 2f
13877 +1:
13878 +
13879 +#ifdef CONFIG_PARAVIRT
13880 + PV_RESTORE_REGS(CLBR_RDI);
13881 +#endif
13882 +
13883 + popq %rdi
13884 + pax_force_retaddr
13885 + retq
13886 +
13887 +2: GET_CR0_INTO_RDI
13888 + btr $16,%rdi
13889 + ljmpq __KERNEL_CS,3f
13890 +3: SET_RDI_INTO_CR0
13891 + jmp 1b
13892 +#ifdef CONFIG_PARAVIRT
13893 + PV_RESTORE_REGS(CLBR_RDI);
13894 +#endif
13895 +
13896 + popq %rdi
13897 + pax_force_retaddr
13898 + retq
13899 +ENDPROC(pax_exit_kernel)
13900 +#endif
13901 +
13902 + .macro pax_enter_kernel_user
13903 + pax_set_fptr_mask
13904 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13905 + call pax_enter_kernel_user
13906 +#endif
13907 + .endm
13908 +
13909 + .macro pax_exit_kernel_user
13910 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13911 + call pax_exit_kernel_user
13912 +#endif
13913 +#ifdef CONFIG_PAX_RANDKSTACK
13914 + pushq %rax
13915 + call pax_randomize_kstack
13916 + popq %rax
13917 +#endif
13918 + .endm
13919 +
13920 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13921 +ENTRY(pax_enter_kernel_user)
13922 + pushq %rdi
13923 + pushq %rbx
13924 +
13925 +#ifdef CONFIG_PARAVIRT
13926 + PV_SAVE_REGS(CLBR_RDI)
13927 +#endif
13928 +
13929 + GET_CR3_INTO_RDI
13930 + mov %rdi,%rbx
13931 + add $__START_KERNEL_map,%rbx
13932 + sub phys_base(%rip),%rbx
13933 +
13934 +#ifdef CONFIG_PARAVIRT
13935 + pushq %rdi
13936 + cmpl $0, pv_info+PARAVIRT_enabled
13937 + jz 1f
13938 + i = 0
13939 + .rept USER_PGD_PTRS
13940 + mov i*8(%rbx),%rsi
13941 + mov $0,%sil
13942 + lea i*8(%rbx),%rdi
13943 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13944 + i = i + 1
13945 + .endr
13946 + jmp 2f
13947 +1:
13948 +#endif
13949 +
13950 + i = 0
13951 + .rept USER_PGD_PTRS
13952 + movb $0,i*8(%rbx)
13953 + i = i + 1
13954 + .endr
13955 +
13956 +#ifdef CONFIG_PARAVIRT
13957 +2: popq %rdi
13958 +#endif
13959 + SET_RDI_INTO_CR3
13960 +
13961 +#ifdef CONFIG_PAX_KERNEXEC
13962 + GET_CR0_INTO_RDI
13963 + bts $16,%rdi
13964 + SET_RDI_INTO_CR0
13965 +#endif
13966 +
13967 +#ifdef CONFIG_PARAVIRT
13968 + PV_RESTORE_REGS(CLBR_RDI)
13969 +#endif
13970 +
13971 + popq %rbx
13972 + popq %rdi
13973 + pax_force_retaddr
13974 + retq
13975 +ENDPROC(pax_enter_kernel_user)
13976 +
13977 +ENTRY(pax_exit_kernel_user)
13978 + push %rdi
13979 +
13980 +#ifdef CONFIG_PARAVIRT
13981 + pushq %rbx
13982 + PV_SAVE_REGS(CLBR_RDI)
13983 +#endif
13984 +
13985 +#ifdef CONFIG_PAX_KERNEXEC
13986 + GET_CR0_INTO_RDI
13987 + btr $16,%rdi
13988 + SET_RDI_INTO_CR0
13989 +#endif
13990 +
13991 + GET_CR3_INTO_RDI
13992 + add $__START_KERNEL_map,%rdi
13993 + sub phys_base(%rip),%rdi
13994 +
13995 +#ifdef CONFIG_PARAVIRT
13996 + cmpl $0, pv_info+PARAVIRT_enabled
13997 + jz 1f
13998 + mov %rdi,%rbx
13999 + i = 0
14000 + .rept USER_PGD_PTRS
14001 + mov i*8(%rbx),%rsi
14002 + mov $0x67,%sil
14003 + lea i*8(%rbx),%rdi
14004 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
14005 + i = i + 1
14006 + .endr
14007 + jmp 2f
14008 +1:
14009 +#endif
14010 +
14011 + i = 0
14012 + .rept USER_PGD_PTRS
14013 + movb $0x67,i*8(%rdi)
14014 + i = i + 1
14015 + .endr
14016 +
14017 +#ifdef CONFIG_PARAVIRT
14018 +2: PV_RESTORE_REGS(CLBR_RDI)
14019 + popq %rbx
14020 +#endif
14021 +
14022 + popq %rdi
14023 + pax_force_retaddr
14024 + retq
14025 +ENDPROC(pax_exit_kernel_user)
14026 +#endif
14027 +
14028 +.macro pax_erase_kstack
14029 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14030 + call pax_erase_kstack
14031 +#endif
14032 +.endm
14033 +
14034 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14035 +/*
14036 + * r11: thread_info
14037 + * rcx, rdx: can be clobbered
14038 + */
14039 +ENTRY(pax_erase_kstack)
14040 + pushq %rdi
14041 + pushq %rax
14042 + pushq %r11
14043 +
14044 + GET_THREAD_INFO(%r11)
14045 + mov TI_lowest_stack(%r11), %rdi
14046 + mov $-0xBEEF, %rax
14047 + std
14048 +
14049 +1: mov %edi, %ecx
14050 + and $THREAD_SIZE_asm - 1, %ecx
14051 + shr $3, %ecx
14052 + repne scasq
14053 + jecxz 2f
14054 +
14055 + cmp $2*8, %ecx
14056 + jc 2f
14057 +
14058 + mov $2*8, %ecx
14059 + repe scasq
14060 + jecxz 2f
14061 + jne 1b
14062 +
14063 +2: cld
14064 + mov %esp, %ecx
14065 + sub %edi, %ecx
14066 +
14067 + cmp $THREAD_SIZE_asm, %rcx
14068 + jb 3f
14069 + ud2
14070 +3:
14071 +
14072 + shr $3, %ecx
14073 + rep stosq
14074 +
14075 + mov TI_task_thread_sp0(%r11), %rdi
14076 + sub $256, %rdi
14077 + mov %rdi, TI_lowest_stack(%r11)
14078 +
14079 + popq %r11
14080 + popq %rax
14081 + popq %rdi
14082 + pax_force_retaddr
14083 + ret
14084 +ENDPROC(pax_erase_kstack)
14085 +#endif
14086
14087 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14088 #ifdef CONFIG_TRACE_IRQFLAGS
14089 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
14090 .endm
14091
14092 .macro UNFAKE_STACK_FRAME
14093 - addq $8*6, %rsp
14094 - CFI_ADJUST_CFA_OFFSET -(6*8)
14095 + addq $8*6 + ARG_SKIP, %rsp
14096 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
14097 .endm
14098
14099 /*
14100 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
14101 movq %rsp, %rsi
14102
14103 leaq -RBP(%rsp),%rdi /* arg1 for handler */
14104 - testl $3, CS(%rdi)
14105 + testb $3, CS(%rdi)
14106 je 1f
14107 SWAPGS
14108 /*
14109 @@ -355,9 +639,10 @@ ENTRY(save_rest)
14110 movq_cfi r15, R15+16
14111 movq %r11, 8(%rsp) /* return address */
14112 FIXUP_TOP_OF_STACK %r11, 16
14113 + pax_force_retaddr
14114 ret
14115 CFI_ENDPROC
14116 -END(save_rest)
14117 +ENDPROC(save_rest)
14118
14119 /* save complete stack frame */
14120 .pushsection .kprobes.text, "ax"
14121 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
14122 js 1f /* negative -> in kernel */
14123 SWAPGS
14124 xorl %ebx,%ebx
14125 -1: ret
14126 +1: pax_force_retaddr_bts
14127 + ret
14128 CFI_ENDPROC
14129 -END(save_paranoid)
14130 +ENDPROC(save_paranoid)
14131 .popsection
14132
14133 /*
14134 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
14135
14136 RESTORE_REST
14137
14138 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14139 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14140 je int_ret_from_sys_call
14141
14142 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14143 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
14144 jmp ret_from_sys_call # go to the SYSRET fastpath
14145
14146 CFI_ENDPROC
14147 -END(ret_from_fork)
14148 +ENDPROC(ret_from_fork)
14149
14150 /*
14151 * System call entry. Up to 6 arguments in registers are supported.
14152 @@ -456,7 +742,7 @@ END(ret_from_fork)
14153 ENTRY(system_call)
14154 CFI_STARTPROC simple
14155 CFI_SIGNAL_FRAME
14156 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14157 + CFI_DEF_CFA rsp,0
14158 CFI_REGISTER rip,rcx
14159 /*CFI_REGISTER rflags,r11*/
14160 SWAPGS_UNSAFE_STACK
14161 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
14162
14163 movq %rsp,PER_CPU_VAR(old_rsp)
14164 movq PER_CPU_VAR(kernel_stack),%rsp
14165 + SAVE_ARGS 8*6,0
14166 + pax_enter_kernel_user
14167 /*
14168 * No need to follow this irqs off/on section - it's straight
14169 * and short:
14170 */
14171 ENABLE_INTERRUPTS(CLBR_NONE)
14172 - SAVE_ARGS 8,0
14173 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14174 movq %rcx,RIP-ARGOFFSET(%rsp)
14175 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14176 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
14177 system_call_fastpath:
14178 cmpq $__NR_syscall_max,%rax
14179 ja badsys
14180 - movq %r10,%rcx
14181 + movq R10-ARGOFFSET(%rsp),%rcx
14182 call *sys_call_table(,%rax,8) # XXX: rip relative
14183 movq %rax,RAX-ARGOFFSET(%rsp)
14184 /*
14185 @@ -503,6 +790,8 @@ sysret_check:
14186 andl %edi,%edx
14187 jnz sysret_careful
14188 CFI_REMEMBER_STATE
14189 + pax_exit_kernel_user
14190 + pax_erase_kstack
14191 /*
14192 * sysretq will re-enable interrupts:
14193 */
14194 @@ -554,14 +843,18 @@ badsys:
14195 * jump back to the normal fast path.
14196 */
14197 auditsys:
14198 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
14199 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
14200 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
14201 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
14202 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
14203 movq %rax,%rsi /* 2nd arg: syscall number */
14204 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14205 call audit_syscall_entry
14206 +
14207 + pax_erase_kstack
14208 +
14209 LOAD_ARGS 0 /* reload call-clobbered registers */
14210 + pax_set_fptr_mask
14211 jmp system_call_fastpath
14212
14213 /*
14214 @@ -591,16 +884,20 @@ tracesys:
14215 FIXUP_TOP_OF_STACK %rdi
14216 movq %rsp,%rdi
14217 call syscall_trace_enter
14218 +
14219 + pax_erase_kstack
14220 +
14221 /*
14222 * Reload arg registers from stack in case ptrace changed them.
14223 * We don't reload %rax because syscall_trace_enter() returned
14224 * the value it wants us to use in the table lookup.
14225 */
14226 LOAD_ARGS ARGOFFSET, 1
14227 + pax_set_fptr_mask
14228 RESTORE_REST
14229 cmpq $__NR_syscall_max,%rax
14230 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
14231 - movq %r10,%rcx /* fixup for C */
14232 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
14233 call *sys_call_table(,%rax,8)
14234 movq %rax,RAX-ARGOFFSET(%rsp)
14235 /* Use IRET because user could have changed frame */
14236 @@ -612,7 +909,7 @@ tracesys:
14237 GLOBAL(int_ret_from_sys_call)
14238 DISABLE_INTERRUPTS(CLBR_NONE)
14239 TRACE_IRQS_OFF
14240 - testl $3,CS-ARGOFFSET(%rsp)
14241 + testb $3,CS-ARGOFFSET(%rsp)
14242 je retint_restore_args
14243 movl $_TIF_ALLWORK_MASK,%edi
14244 /* edi: mask to check */
14245 @@ -623,6 +920,7 @@ GLOBAL(int_with_check)
14246 andl %edi,%edx
14247 jnz int_careful
14248 andl $~TS_COMPAT,TI_status(%rcx)
14249 + pax_erase_kstack
14250 jmp retint_swapgs
14251
14252 /* Either reschedule or signal or syscall exit tracking needed. */
14253 @@ -669,7 +967,7 @@ int_restore_rest:
14254 TRACE_IRQS_OFF
14255 jmp int_with_check
14256 CFI_ENDPROC
14257 -END(system_call)
14258 +ENDPROC(system_call)
14259
14260 /*
14261 * Certain special system calls that need to save a complete full stack frame.
14262 @@ -685,7 +983,7 @@ ENTRY(\label)
14263 call \func
14264 jmp ptregscall_common
14265 CFI_ENDPROC
14266 -END(\label)
14267 +ENDPROC(\label)
14268 .endm
14269
14270 PTREGSCALL stub_clone, sys_clone, %r8
14271 @@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
14272 movq_cfi_restore R12+8, r12
14273 movq_cfi_restore RBP+8, rbp
14274 movq_cfi_restore RBX+8, rbx
14275 + pax_force_retaddr
14276 ret $REST_SKIP /* pop extended registers */
14277 CFI_ENDPROC
14278 -END(ptregscall_common)
14279 +ENDPROC(ptregscall_common)
14280
14281 ENTRY(stub_execve)
14282 CFI_STARTPROC
14283 @@ -720,7 +1019,7 @@ ENTRY(stub_execve)
14284 RESTORE_REST
14285 jmp int_ret_from_sys_call
14286 CFI_ENDPROC
14287 -END(stub_execve)
14288 +ENDPROC(stub_execve)
14289
14290 /*
14291 * sigreturn is special because it needs to restore all registers on return.
14292 @@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
14293 RESTORE_REST
14294 jmp int_ret_from_sys_call
14295 CFI_ENDPROC
14296 -END(stub_rt_sigreturn)
14297 +ENDPROC(stub_rt_sigreturn)
14298
14299 /*
14300 * Build the entry stubs and pointer table with some assembler magic.
14301 @@ -773,7 +1072,7 @@ vector=vector+1
14302 2: jmp common_interrupt
14303 .endr
14304 CFI_ENDPROC
14305 -END(irq_entries_start)
14306 +ENDPROC(irq_entries_start)
14307
14308 .previous
14309 END(interrupt)
14310 @@ -793,6 +1092,16 @@ END(interrupt)
14311 subq $ORIG_RAX-RBP, %rsp
14312 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14313 SAVE_ARGS_IRQ
14314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14315 + testb $3, CS(%rdi)
14316 + jnz 1f
14317 + pax_enter_kernel
14318 + jmp 2f
14319 +1: pax_enter_kernel_user
14320 +2:
14321 +#else
14322 + pax_enter_kernel
14323 +#endif
14324 call \func
14325 .endm
14326
14327 @@ -824,7 +1133,7 @@ ret_from_intr:
14328
14329 exit_intr:
14330 GET_THREAD_INFO(%rcx)
14331 - testl $3,CS-ARGOFFSET(%rsp)
14332 + testb $3,CS-ARGOFFSET(%rsp)
14333 je retint_kernel
14334
14335 /* Interrupt came from user space */
14336 @@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
14337 * The iretq could re-enable interrupts:
14338 */
14339 DISABLE_INTERRUPTS(CLBR_ANY)
14340 + pax_exit_kernel_user
14341 TRACE_IRQS_IRETQ
14342 SWAPGS
14343 jmp restore_args
14344
14345 retint_restore_args: /* return to kernel space */
14346 DISABLE_INTERRUPTS(CLBR_ANY)
14347 + pax_exit_kernel
14348 + pax_force_retaddr RIP-ARGOFFSET
14349 /*
14350 * The iretq could re-enable interrupts:
14351 */
14352 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14353 #endif
14354
14355 CFI_ENDPROC
14356 -END(common_interrupt)
14357 +ENDPROC(common_interrupt)
14358 /*
14359 * End of kprobes section
14360 */
14361 @@ -956,7 +1268,7 @@ ENTRY(\sym)
14362 interrupt \do_sym
14363 jmp ret_from_intr
14364 CFI_ENDPROC
14365 -END(\sym)
14366 +ENDPROC(\sym)
14367 .endm
14368
14369 #ifdef CONFIG_SMP
14370 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
14371 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14372 call error_entry
14373 DEFAULT_FRAME 0
14374 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14375 + testb $3, CS(%rsp)
14376 + jnz 1f
14377 + pax_enter_kernel
14378 + jmp 2f
14379 +1: pax_enter_kernel_user
14380 +2:
14381 +#else
14382 + pax_enter_kernel
14383 +#endif
14384 movq %rsp,%rdi /* pt_regs pointer */
14385 xorl %esi,%esi /* no error code */
14386 call \do_sym
14387 jmp error_exit /* %ebx: no swapgs flag */
14388 CFI_ENDPROC
14389 -END(\sym)
14390 +ENDPROC(\sym)
14391 .endm
14392
14393 .macro paranoidzeroentry sym do_sym
14394 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
14395 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14396 call save_paranoid
14397 TRACE_IRQS_OFF
14398 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14399 + testb $3, CS(%rsp)
14400 + jnz 1f
14401 + pax_enter_kernel
14402 + jmp 2f
14403 +1: pax_enter_kernel_user
14404 +2:
14405 +#else
14406 + pax_enter_kernel
14407 +#endif
14408 movq %rsp,%rdi /* pt_regs pointer */
14409 xorl %esi,%esi /* no error code */
14410 call \do_sym
14411 jmp paranoid_exit /* %ebx: no swapgs flag */
14412 CFI_ENDPROC
14413 -END(\sym)
14414 +ENDPROC(\sym)
14415 .endm
14416
14417 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14418 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14419 .macro paranoidzeroentry_ist sym do_sym ist
14420 ENTRY(\sym)
14421 INTR_FRAME
14422 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
14423 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14424 call save_paranoid
14425 TRACE_IRQS_OFF
14426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14427 + testb $3, CS(%rsp)
14428 + jnz 1f
14429 + pax_enter_kernel
14430 + jmp 2f
14431 +1: pax_enter_kernel_user
14432 +2:
14433 +#else
14434 + pax_enter_kernel
14435 +#endif
14436 movq %rsp,%rdi /* pt_regs pointer */
14437 xorl %esi,%esi /* no error code */
14438 +#ifdef CONFIG_SMP
14439 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14440 + lea init_tss(%r12), %r12
14441 +#else
14442 + lea init_tss(%rip), %r12
14443 +#endif
14444 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14445 call \do_sym
14446 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14447 jmp paranoid_exit /* %ebx: no swapgs flag */
14448 CFI_ENDPROC
14449 -END(\sym)
14450 +ENDPROC(\sym)
14451 .endm
14452
14453 .macro errorentry sym do_sym
14454 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
14455 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14456 call error_entry
14457 DEFAULT_FRAME 0
14458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14459 + testb $3, CS(%rsp)
14460 + jnz 1f
14461 + pax_enter_kernel
14462 + jmp 2f
14463 +1: pax_enter_kernel_user
14464 +2:
14465 +#else
14466 + pax_enter_kernel
14467 +#endif
14468 movq %rsp,%rdi /* pt_regs pointer */
14469 movq ORIG_RAX(%rsp),%rsi /* get error code */
14470 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14471 call \do_sym
14472 jmp error_exit /* %ebx: no swapgs flag */
14473 CFI_ENDPROC
14474 -END(\sym)
14475 +ENDPROC(\sym)
14476 .endm
14477
14478 /* error code is on the stack already */
14479 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
14480 call save_paranoid
14481 DEFAULT_FRAME 0
14482 TRACE_IRQS_OFF
14483 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14484 + testb $3, CS(%rsp)
14485 + jnz 1f
14486 + pax_enter_kernel
14487 + jmp 2f
14488 +1: pax_enter_kernel_user
14489 +2:
14490 +#else
14491 + pax_enter_kernel
14492 +#endif
14493 movq %rsp,%rdi /* pt_regs pointer */
14494 movq ORIG_RAX(%rsp),%rsi /* get error code */
14495 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14496 call \do_sym
14497 jmp paranoid_exit /* %ebx: no swapgs flag */
14498 CFI_ENDPROC
14499 -END(\sym)
14500 +ENDPROC(\sym)
14501 .endm
14502
14503 zeroentry divide_error do_divide_error
14504 @@ -1129,9 +1497,10 @@ gs_change:
14505 2: mfence /* workaround */
14506 SWAPGS
14507 popfq_cfi
14508 + pax_force_retaddr
14509 ret
14510 CFI_ENDPROC
14511 -END(native_load_gs_index)
14512 +ENDPROC(native_load_gs_index)
14513
14514 .section __ex_table,"a"
14515 .align 8
14516 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14517 * Here we are in the child and the registers are set as they were
14518 * at kernel_thread() invocation in the parent.
14519 */
14520 + pax_force_fptr %rsi
14521 call *%rsi
14522 # exit
14523 mov %eax, %edi
14524 call do_exit
14525 ud2 # padding for call trace
14526 CFI_ENDPROC
14527 -END(kernel_thread_helper)
14528 +ENDPROC(kernel_thread_helper)
14529
14530 /*
14531 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14532 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14533 RESTORE_REST
14534 testq %rax,%rax
14535 je int_ret_from_sys_call
14536 - RESTORE_ARGS
14537 UNFAKE_STACK_FRAME
14538 + pax_force_retaddr
14539 ret
14540 CFI_ENDPROC
14541 -END(kernel_execve)
14542 +ENDPROC(kernel_execve)
14543
14544 /* Call softirq on interrupt stack. Interrupts are off. */
14545 ENTRY(call_softirq)
14546 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14547 CFI_DEF_CFA_REGISTER rsp
14548 CFI_ADJUST_CFA_OFFSET -8
14549 decl PER_CPU_VAR(irq_count)
14550 + pax_force_retaddr
14551 ret
14552 CFI_ENDPROC
14553 -END(call_softirq)
14554 +ENDPROC(call_softirq)
14555
14556 #ifdef CONFIG_XEN
14557 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14558 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14559 decl PER_CPU_VAR(irq_count)
14560 jmp error_exit
14561 CFI_ENDPROC
14562 -END(xen_do_hypervisor_callback)
14563 +ENDPROC(xen_do_hypervisor_callback)
14564
14565 /*
14566 * Hypervisor uses this for application faults while it executes.
14567 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14568 SAVE_ALL
14569 jmp error_exit
14570 CFI_ENDPROC
14571 -END(xen_failsafe_callback)
14572 +ENDPROC(xen_failsafe_callback)
14573
14574 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14575 xen_hvm_callback_vector xen_evtchn_do_upcall
14576 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14577 TRACE_IRQS_OFF
14578 testl %ebx,%ebx /* swapgs needed? */
14579 jnz paranoid_restore
14580 - testl $3,CS(%rsp)
14581 + testb $3,CS(%rsp)
14582 jnz paranoid_userspace
14583 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14584 + pax_exit_kernel
14585 + TRACE_IRQS_IRETQ 0
14586 + SWAPGS_UNSAFE_STACK
14587 + RESTORE_ALL 8
14588 + pax_force_retaddr_bts
14589 + jmp irq_return
14590 +#endif
14591 paranoid_swapgs:
14592 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14593 + pax_exit_kernel_user
14594 +#else
14595 + pax_exit_kernel
14596 +#endif
14597 TRACE_IRQS_IRETQ 0
14598 SWAPGS_UNSAFE_STACK
14599 RESTORE_ALL 8
14600 jmp irq_return
14601 paranoid_restore:
14602 + pax_exit_kernel
14603 TRACE_IRQS_IRETQ 0
14604 RESTORE_ALL 8
14605 + pax_force_retaddr_bts
14606 jmp irq_return
14607 paranoid_userspace:
14608 GET_THREAD_INFO(%rcx)
14609 @@ -1394,7 +1780,7 @@ paranoid_schedule:
14610 TRACE_IRQS_OFF
14611 jmp paranoid_userspace
14612 CFI_ENDPROC
14613 -END(paranoid_exit)
14614 +ENDPROC(paranoid_exit)
14615
14616 /*
14617 * Exception entry point. This expects an error code/orig_rax on the stack.
14618 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14619 movq_cfi r14, R14+8
14620 movq_cfi r15, R15+8
14621 xorl %ebx,%ebx
14622 - testl $3,CS+8(%rsp)
14623 + testb $3,CS+8(%rsp)
14624 je error_kernelspace
14625 error_swapgs:
14626 SWAPGS
14627 error_sti:
14628 TRACE_IRQS_OFF
14629 + pax_force_retaddr_bts
14630 ret
14631
14632 /*
14633 @@ -1453,7 +1840,7 @@ bstep_iret:
14634 movq %rcx,RIP+8(%rsp)
14635 jmp error_swapgs
14636 CFI_ENDPROC
14637 -END(error_entry)
14638 +ENDPROC(error_entry)
14639
14640
14641 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14642 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14643 jnz retint_careful
14644 jmp retint_swapgs
14645 CFI_ENDPROC
14646 -END(error_exit)
14647 +ENDPROC(error_exit)
14648
14649
14650 /* runs on exception stack */
14651 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
14652 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14653 call save_paranoid
14654 DEFAULT_FRAME 0
14655 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14656 + testb $3, CS(%rsp)
14657 + jnz 1f
14658 + pax_enter_kernel
14659 + jmp 2f
14660 +1: pax_enter_kernel_user
14661 +2:
14662 +#else
14663 + pax_enter_kernel
14664 +#endif
14665 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14666 movq %rsp,%rdi
14667 movq $-1,%rsi
14668 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
14669 DISABLE_INTERRUPTS(CLBR_NONE)
14670 testl %ebx,%ebx /* swapgs needed? */
14671 jnz nmi_restore
14672 - testl $3,CS(%rsp)
14673 + testb $3,CS(%rsp)
14674 jnz nmi_userspace
14675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14676 + pax_exit_kernel
14677 + SWAPGS_UNSAFE_STACK
14678 + RESTORE_ALL 8
14679 + pax_force_retaddr_bts
14680 + jmp irq_return
14681 +#endif
14682 nmi_swapgs:
14683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14684 + pax_exit_kernel_user
14685 +#else
14686 + pax_exit_kernel
14687 +#endif
14688 SWAPGS_UNSAFE_STACK
14689 + RESTORE_ALL 8
14690 + jmp irq_return
14691 nmi_restore:
14692 + pax_exit_kernel
14693 RESTORE_ALL 8
14694 + pax_force_retaddr_bts
14695 jmp irq_return
14696 nmi_userspace:
14697 GET_THREAD_INFO(%rcx)
14698 @@ -1529,14 +1942,14 @@ nmi_schedule:
14699 jmp paranoid_exit
14700 CFI_ENDPROC
14701 #endif
14702 -END(nmi)
14703 +ENDPROC(nmi)
14704
14705 ENTRY(ignore_sysret)
14706 CFI_STARTPROC
14707 mov $-ENOSYS,%eax
14708 sysret
14709 CFI_ENDPROC
14710 -END(ignore_sysret)
14711 +ENDPROC(ignore_sysret)
14712
14713 /*
14714 * End of kprobes section
14715 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14716 index c9a281f..ce2f317 100644
14717 --- a/arch/x86/kernel/ftrace.c
14718 +++ b/arch/x86/kernel/ftrace.c
14719 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14720 static const void *mod_code_newcode; /* holds the text to write to the IP */
14721
14722 static unsigned nmi_wait_count;
14723 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14724 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14725
14726 int ftrace_arch_read_dyn_info(char *buf, int size)
14727 {
14728 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14729
14730 r = snprintf(buf, size, "%u %u",
14731 nmi_wait_count,
14732 - atomic_read(&nmi_update_count));
14733 + atomic_read_unchecked(&nmi_update_count));
14734 return r;
14735 }
14736
14737 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14738
14739 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14740 smp_rmb();
14741 + pax_open_kernel();
14742 ftrace_mod_code();
14743 - atomic_inc(&nmi_update_count);
14744 + pax_close_kernel();
14745 + atomic_inc_unchecked(&nmi_update_count);
14746 }
14747 /* Must have previous changes seen before executions */
14748 smp_mb();
14749 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14750 {
14751 unsigned char replaced[MCOUNT_INSN_SIZE];
14752
14753 + ip = ktla_ktva(ip);
14754 +
14755 /*
14756 * Note: Due to modules and __init, code can
14757 * disappear and change, we need to protect against faulting
14758 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14759 unsigned char old[MCOUNT_INSN_SIZE], *new;
14760 int ret;
14761
14762 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14763 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14764 new = ftrace_call_replace(ip, (unsigned long)func);
14765 ret = ftrace_modify_code(ip, old, new);
14766
14767 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14768 {
14769 unsigned char code[MCOUNT_INSN_SIZE];
14770
14771 + ip = ktla_ktva(ip);
14772 +
14773 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14774 return -EFAULT;
14775
14776 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14777 index 3bb0850..55a56f4 100644
14778 --- a/arch/x86/kernel/head32.c
14779 +++ b/arch/x86/kernel/head32.c
14780 @@ -19,6 +19,7 @@
14781 #include <asm/io_apic.h>
14782 #include <asm/bios_ebda.h>
14783 #include <asm/tlbflush.h>
14784 +#include <asm/boot.h>
14785
14786 static void __init i386_default_early_setup(void)
14787 {
14788 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14789 {
14790 memblock_init();
14791
14792 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14793 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14794
14795 #ifdef CONFIG_BLK_DEV_INITRD
14796 /* Reserve INITRD */
14797 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14798 index ce0be7c..c41476e 100644
14799 --- a/arch/x86/kernel/head_32.S
14800 +++ b/arch/x86/kernel/head_32.S
14801 @@ -25,6 +25,12 @@
14802 /* Physical address */
14803 #define pa(X) ((X) - __PAGE_OFFSET)
14804
14805 +#ifdef CONFIG_PAX_KERNEXEC
14806 +#define ta(X) (X)
14807 +#else
14808 +#define ta(X) ((X) - __PAGE_OFFSET)
14809 +#endif
14810 +
14811 /*
14812 * References to members of the new_cpu_data structure.
14813 */
14814 @@ -54,11 +60,7 @@
14815 * and small than max_low_pfn, otherwise will waste some page table entries
14816 */
14817
14818 -#if PTRS_PER_PMD > 1
14819 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14820 -#else
14821 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14822 -#endif
14823 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14824
14825 /* Number of possible pages in the lowmem region */
14826 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14827 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14828 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14829
14830 /*
14831 + * Real beginning of normal "text" segment
14832 + */
14833 +ENTRY(stext)
14834 +ENTRY(_stext)
14835 +
14836 +/*
14837 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14838 * %esi points to the real-mode code as a 32-bit pointer.
14839 * CS and DS must be 4 GB flat segments, but we don't depend on
14840 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14841 * can.
14842 */
14843 __HEAD
14844 +
14845 +#ifdef CONFIG_PAX_KERNEXEC
14846 + jmp startup_32
14847 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14848 +.fill PAGE_SIZE-5,1,0xcc
14849 +#endif
14850 +
14851 ENTRY(startup_32)
14852 movl pa(stack_start),%ecx
14853
14854 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14855 2:
14856 leal -__PAGE_OFFSET(%ecx),%esp
14857
14858 +#ifdef CONFIG_SMP
14859 + movl $pa(cpu_gdt_table),%edi
14860 + movl $__per_cpu_load,%eax
14861 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14862 + rorl $16,%eax
14863 + movb %al,__KERNEL_PERCPU + 4(%edi)
14864 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14865 + movl $__per_cpu_end - 1,%eax
14866 + subl $__per_cpu_start,%eax
14867 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14868 +#endif
14869 +
14870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14871 + movl $NR_CPUS,%ecx
14872 + movl $pa(cpu_gdt_table),%edi
14873 +1:
14874 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14875 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14876 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14877 + addl $PAGE_SIZE_asm,%edi
14878 + loop 1b
14879 +#endif
14880 +
14881 +#ifdef CONFIG_PAX_KERNEXEC
14882 + movl $pa(boot_gdt),%edi
14883 + movl $__LOAD_PHYSICAL_ADDR,%eax
14884 + movw %ax,__BOOT_CS + 2(%edi)
14885 + rorl $16,%eax
14886 + movb %al,__BOOT_CS + 4(%edi)
14887 + movb %ah,__BOOT_CS + 7(%edi)
14888 + rorl $16,%eax
14889 +
14890 + ljmp $(__BOOT_CS),$1f
14891 +1:
14892 +
14893 + movl $NR_CPUS,%ecx
14894 + movl $pa(cpu_gdt_table),%edi
14895 + addl $__PAGE_OFFSET,%eax
14896 +1:
14897 + movw %ax,__KERNEL_CS + 2(%edi)
14898 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14899 + rorl $16,%eax
14900 + movb %al,__KERNEL_CS + 4(%edi)
14901 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14902 + movb %ah,__KERNEL_CS + 7(%edi)
14903 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14904 + rorl $16,%eax
14905 + addl $PAGE_SIZE_asm,%edi
14906 + loop 1b
14907 +#endif
14908 +
14909 /*
14910 * Clear BSS first so that there are no surprises...
14911 */
14912 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14913 movl %eax, pa(max_pfn_mapped)
14914
14915 /* Do early initialization of the fixmap area */
14916 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14917 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14918 +#ifdef CONFIG_COMPAT_VDSO
14919 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14920 +#else
14921 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14922 +#endif
14923 #else /* Not PAE */
14924
14925 page_pde_offset = (__PAGE_OFFSET >> 20);
14926 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14927 movl %eax, pa(max_pfn_mapped)
14928
14929 /* Do early initialization of the fixmap area */
14930 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14931 - movl %eax,pa(initial_page_table+0xffc)
14932 +#ifdef CONFIG_COMPAT_VDSO
14933 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14934 +#else
14935 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14936 +#endif
14937 #endif
14938
14939 #ifdef CONFIG_PARAVIRT
14940 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14941 cmpl $num_subarch_entries, %eax
14942 jae bad_subarch
14943
14944 - movl pa(subarch_entries)(,%eax,4), %eax
14945 - subl $__PAGE_OFFSET, %eax
14946 - jmp *%eax
14947 + jmp *pa(subarch_entries)(,%eax,4)
14948
14949 bad_subarch:
14950 WEAK(lguest_entry)
14951 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14952 __INITDATA
14953
14954 subarch_entries:
14955 - .long default_entry /* normal x86/PC */
14956 - .long lguest_entry /* lguest hypervisor */
14957 - .long xen_entry /* Xen hypervisor */
14958 - .long default_entry /* Moorestown MID */
14959 + .long ta(default_entry) /* normal x86/PC */
14960 + .long ta(lguest_entry) /* lguest hypervisor */
14961 + .long ta(xen_entry) /* Xen hypervisor */
14962 + .long ta(default_entry) /* Moorestown MID */
14963 num_subarch_entries = (. - subarch_entries) / 4
14964 .previous
14965 #else
14966 @@ -312,6 +382,7 @@ default_entry:
14967 orl %edx,%eax
14968 movl %eax,%cr4
14969
14970 +#ifdef CONFIG_X86_PAE
14971 testb $X86_CR4_PAE, %al # check if PAE is enabled
14972 jz 6f
14973
14974 @@ -340,6 +411,9 @@ default_entry:
14975 /* Make changes effective */
14976 wrmsr
14977
14978 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14979 +#endif
14980 +
14981 6:
14982
14983 /*
14984 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14985 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14986 movl %eax,%ss # after changing gdt.
14987
14988 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14989 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14990 movl %eax,%ds
14991 movl %eax,%es
14992
14993 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14994 */
14995 cmpb $0,ready
14996 jne 1f
14997 - movl $gdt_page,%eax
14998 + movl $cpu_gdt_table,%eax
14999 movl $stack_canary,%ecx
15000 +#ifdef CONFIG_SMP
15001 + addl $__per_cpu_load,%ecx
15002 +#endif
15003 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
15004 shrl $16, %ecx
15005 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
15006 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
15007 1:
15008 -#endif
15009 movl $(__KERNEL_STACK_CANARY),%eax
15010 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15011 + movl $(__USER_DS),%eax
15012 +#else
15013 + xorl %eax,%eax
15014 +#endif
15015 movl %eax,%gs
15016
15017 xorl %eax,%eax # Clear LDT
15018 @@ -558,22 +639,22 @@ early_page_fault:
15019 jmp early_fault
15020
15021 early_fault:
15022 - cld
15023 #ifdef CONFIG_PRINTK
15024 + cmpl $1,%ss:early_recursion_flag
15025 + je hlt_loop
15026 + incl %ss:early_recursion_flag
15027 + cld
15028 pusha
15029 movl $(__KERNEL_DS),%eax
15030 movl %eax,%ds
15031 movl %eax,%es
15032 - cmpl $2,early_recursion_flag
15033 - je hlt_loop
15034 - incl early_recursion_flag
15035 movl %cr2,%eax
15036 pushl %eax
15037 pushl %edx /* trapno */
15038 pushl $fault_msg
15039 call printk
15040 +; call dump_stack
15041 #endif
15042 - call dump_stack
15043 hlt_loop:
15044 hlt
15045 jmp hlt_loop
15046 @@ -581,8 +662,11 @@ hlt_loop:
15047 /* This is the default interrupt "handler" :-) */
15048 ALIGN
15049 ignore_int:
15050 - cld
15051 #ifdef CONFIG_PRINTK
15052 + cmpl $2,%ss:early_recursion_flag
15053 + je hlt_loop
15054 + incl %ss:early_recursion_flag
15055 + cld
15056 pushl %eax
15057 pushl %ecx
15058 pushl %edx
15059 @@ -591,9 +675,6 @@ ignore_int:
15060 movl $(__KERNEL_DS),%eax
15061 movl %eax,%ds
15062 movl %eax,%es
15063 - cmpl $2,early_recursion_flag
15064 - je hlt_loop
15065 - incl early_recursion_flag
15066 pushl 16(%esp)
15067 pushl 24(%esp)
15068 pushl 32(%esp)
15069 @@ -622,29 +703,43 @@ ENTRY(initial_code)
15070 /*
15071 * BSS section
15072 */
15073 -__PAGE_ALIGNED_BSS
15074 - .align PAGE_SIZE
15075 #ifdef CONFIG_X86_PAE
15076 +.section .initial_pg_pmd,"a",@progbits
15077 initial_pg_pmd:
15078 .fill 1024*KPMDS,4,0
15079 #else
15080 +.section .initial_page_table,"a",@progbits
15081 ENTRY(initial_page_table)
15082 .fill 1024,4,0
15083 #endif
15084 +.section .initial_pg_fixmap,"a",@progbits
15085 initial_pg_fixmap:
15086 .fill 1024,4,0
15087 +.section .empty_zero_page,"a",@progbits
15088 ENTRY(empty_zero_page)
15089 .fill 4096,1,0
15090 +.section .swapper_pg_dir,"a",@progbits
15091 ENTRY(swapper_pg_dir)
15092 +#ifdef CONFIG_X86_PAE
15093 + .fill 4,8,0
15094 +#else
15095 .fill 1024,4,0
15096 +#endif
15097 +
15098 +/*
15099 + * The IDT has to be page-aligned to simplify the Pentium
15100 + * F0 0F bug workaround.. We have a special link segment
15101 + * for this.
15102 + */
15103 +.section .idt,"a",@progbits
15104 +ENTRY(idt_table)
15105 + .fill 256,8,0
15106
15107 /*
15108 * This starts the data section.
15109 */
15110 #ifdef CONFIG_X86_PAE
15111 -__PAGE_ALIGNED_DATA
15112 - /* Page-aligned for the benefit of paravirt? */
15113 - .align PAGE_SIZE
15114 +.section .initial_page_table,"a",@progbits
15115 ENTRY(initial_page_table)
15116 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
15117 # if KPMDS == 3
15118 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
15119 # error "Kernel PMDs should be 1, 2 or 3"
15120 # endif
15121 .align PAGE_SIZE /* needs to be page-sized too */
15122 +
15123 +#ifdef CONFIG_PAX_PER_CPU_PGD
15124 +ENTRY(cpu_pgd)
15125 + .rept NR_CPUS
15126 + .fill 4,8,0
15127 + .endr
15128 +#endif
15129 +
15130 #endif
15131
15132 .data
15133 .balign 4
15134 ENTRY(stack_start)
15135 - .long init_thread_union+THREAD_SIZE
15136 + .long init_thread_union+THREAD_SIZE-8
15137
15138 +ready: .byte 0
15139 +
15140 +.section .rodata,"a",@progbits
15141 early_recursion_flag:
15142 .long 0
15143
15144 -ready: .byte 0
15145 -
15146 int_msg:
15147 .asciz "Unknown interrupt or fault at: %p %p %p\n"
15148
15149 @@ -707,7 +811,7 @@ fault_msg:
15150 .word 0 # 32 bit align gdt_desc.address
15151 boot_gdt_descr:
15152 .word __BOOT_DS+7
15153 - .long boot_gdt - __PAGE_OFFSET
15154 + .long pa(boot_gdt)
15155
15156 .word 0 # 32-bit align idt_desc.address
15157 idt_descr:
15158 @@ -718,7 +822,7 @@ idt_descr:
15159 .word 0 # 32 bit align gdt_desc.address
15160 ENTRY(early_gdt_descr)
15161 .word GDT_ENTRIES*8-1
15162 - .long gdt_page /* Overwritten for secondary CPUs */
15163 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
15164
15165 /*
15166 * The boot_gdt must mirror the equivalent in setup.S and is
15167 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
15168 .align L1_CACHE_BYTES
15169 ENTRY(boot_gdt)
15170 .fill GDT_ENTRY_BOOT_CS,8,0
15171 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
15172 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
15173 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
15174 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
15175 +
15176 + .align PAGE_SIZE_asm
15177 +ENTRY(cpu_gdt_table)
15178 + .rept NR_CPUS
15179 + .quad 0x0000000000000000 /* NULL descriptor */
15180 + .quad 0x0000000000000000 /* 0x0b reserved */
15181 + .quad 0x0000000000000000 /* 0x13 reserved */
15182 + .quad 0x0000000000000000 /* 0x1b reserved */
15183 +
15184 +#ifdef CONFIG_PAX_KERNEXEC
15185 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
15186 +#else
15187 + .quad 0x0000000000000000 /* 0x20 unused */
15188 +#endif
15189 +
15190 + .quad 0x0000000000000000 /* 0x28 unused */
15191 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
15192 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
15193 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
15194 + .quad 0x0000000000000000 /* 0x4b reserved */
15195 + .quad 0x0000000000000000 /* 0x53 reserved */
15196 + .quad 0x0000000000000000 /* 0x5b reserved */
15197 +
15198 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
15199 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
15200 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
15201 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
15202 +
15203 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
15204 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
15205 +
15206 + /*
15207 + * Segments used for calling PnP BIOS have byte granularity.
15208 + * The code segments and data segments have fixed 64k limits,
15209 + * the transfer segment sizes are set at run time.
15210 + */
15211 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
15212 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
15213 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
15214 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
15215 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
15216 +
15217 + /*
15218 + * The APM segments have byte granularity and their bases
15219 + * are set at run time. All have 64k limits.
15220 + */
15221 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
15222 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
15223 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
15224 +
15225 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
15226 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15227 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
15228 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
15229 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
15230 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
15231 +
15232 + /* Be sure this is zeroed to avoid false validations in Xen */
15233 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
15234 + .endr
15235 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
15236 index e11e394..9aebc5d 100644
15237 --- a/arch/x86/kernel/head_64.S
15238 +++ b/arch/x86/kernel/head_64.S
15239 @@ -19,6 +19,8 @@
15240 #include <asm/cache.h>
15241 #include <asm/processor-flags.h>
15242 #include <asm/percpu.h>
15243 +#include <asm/cpufeature.h>
15244 +#include <asm/alternative-asm.h>
15245
15246 #ifdef CONFIG_PARAVIRT
15247 #include <asm/asm-offsets.h>
15248 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15249 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15250 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15251 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15252 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
15253 +L3_VMALLOC_START = pud_index(VMALLOC_START)
15254 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
15255 +L3_VMALLOC_END = pud_index(VMALLOC_END)
15256 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15257 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15258
15259 .text
15260 __HEAD
15261 @@ -85,35 +93,23 @@ startup_64:
15262 */
15263 addq %rbp, init_level4_pgt + 0(%rip)
15264 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15265 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15266 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15267 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15268 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15269
15270 addq %rbp, level3_ident_pgt + 0(%rip)
15271 +#ifndef CONFIG_XEN
15272 + addq %rbp, level3_ident_pgt + 8(%rip)
15273 +#endif
15274
15275 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15276 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15277 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15278 +
15279 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15280 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15281
15282 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15283 -
15284 - /* Add an Identity mapping if I am above 1G */
15285 - leaq _text(%rip), %rdi
15286 - andq $PMD_PAGE_MASK, %rdi
15287 -
15288 - movq %rdi, %rax
15289 - shrq $PUD_SHIFT, %rax
15290 - andq $(PTRS_PER_PUD - 1), %rax
15291 - jz ident_complete
15292 -
15293 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15294 - leaq level3_ident_pgt(%rip), %rbx
15295 - movq %rdx, 0(%rbx, %rax, 8)
15296 -
15297 - movq %rdi, %rax
15298 - shrq $PMD_SHIFT, %rax
15299 - andq $(PTRS_PER_PMD - 1), %rax
15300 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15301 - leaq level2_spare_pgt(%rip), %rbx
15302 - movq %rdx, 0(%rbx, %rax, 8)
15303 -ident_complete:
15304 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15305
15306 /*
15307 * Fixup the kernel text+data virtual addresses. Note that
15308 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15309 * after the boot processor executes this code.
15310 */
15311
15312 - /* Enable PAE mode and PGE */
15313 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15314 + /* Enable PAE mode and PSE/PGE */
15315 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15316 movq %rax, %cr4
15317
15318 /* Setup early boot stage 4 level pagetables. */
15319 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15320 movl $MSR_EFER, %ecx
15321 rdmsr
15322 btsl $_EFER_SCE, %eax /* Enable System Call */
15323 - btl $20,%edi /* No Execute supported? */
15324 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15325 jnc 1f
15326 btsl $_EFER_NX, %eax
15327 + leaq init_level4_pgt(%rip), %rdi
15328 +#ifndef CONFIG_EFI
15329 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15330 +#endif
15331 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15332 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15333 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15334 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15335 1: wrmsr /* Make changes effective */
15336
15337 /* Setup cr0 */
15338 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15339 * jump. In addition we need to ensure %cs is set so we make this
15340 * a far return.
15341 */
15342 + pax_set_fptr_mask
15343 movq initial_code(%rip),%rax
15344 pushq $0 # fake return address to stop unwinder
15345 pushq $__KERNEL_CS # set correct cs
15346 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15347 bad_address:
15348 jmp bad_address
15349
15350 - .section ".init.text","ax"
15351 + __INIT
15352 #ifdef CONFIG_EARLY_PRINTK
15353 .globl early_idt_handlers
15354 early_idt_handlers:
15355 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15356 #endif /* EARLY_PRINTK */
15357 1: hlt
15358 jmp 1b
15359 + .previous
15360
15361 #ifdef CONFIG_EARLY_PRINTK
15362 + __INITDATA
15363 early_recursion_flag:
15364 .long 0
15365 + .previous
15366
15367 + .section .rodata,"a",@progbits
15368 early_idt_msg:
15369 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15370 early_idt_ripmsg:
15371 .asciz "RIP %s\n"
15372 + .previous
15373 #endif /* CONFIG_EARLY_PRINTK */
15374 - .previous
15375
15376 + .section .rodata,"a",@progbits
15377 #define NEXT_PAGE(name) \
15378 .balign PAGE_SIZE; \
15379 ENTRY(name)
15380 @@ -338,7 +348,6 @@ ENTRY(name)
15381 i = i + 1 ; \
15382 .endr
15383
15384 - .data
15385 /*
15386 * This default setting generates an ident mapping at address 0x100000
15387 * and a mapping for the kernel that precisely maps virtual address
15388 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15389 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15390 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15392 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15393 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15394 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15395 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15396 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15397 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15398 .org init_level4_pgt + L4_START_KERNEL*8, 0
15399 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15400 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15401
15402 +#ifdef CONFIG_PAX_PER_CPU_PGD
15403 +NEXT_PAGE(cpu_pgd)
15404 + .rept NR_CPUS
15405 + .fill 512,8,0
15406 + .endr
15407 +#endif
15408 +
15409 NEXT_PAGE(level3_ident_pgt)
15410 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15411 +#ifdef CONFIG_XEN
15412 .fill 511,8,0
15413 +#else
15414 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15415 + .fill 510,8,0
15416 +#endif
15417 +
15418 +NEXT_PAGE(level3_vmalloc_start_pgt)
15419 + .fill 512,8,0
15420 +
15421 +NEXT_PAGE(level3_vmalloc_end_pgt)
15422 + .fill 512,8,0
15423 +
15424 +NEXT_PAGE(level3_vmemmap_pgt)
15425 + .fill L3_VMEMMAP_START,8,0
15426 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15427
15428 NEXT_PAGE(level3_kernel_pgt)
15429 .fill L3_START_KERNEL,8,0
15430 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15431 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15432 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15433
15434 +NEXT_PAGE(level2_vmemmap_pgt)
15435 + .fill 512,8,0
15436 +
15437 NEXT_PAGE(level2_fixmap_pgt)
15438 - .fill 506,8,0
15439 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15440 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15441 - .fill 5,8,0
15442 + .fill 507,8,0
15443 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15444 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15445 + .fill 4,8,0
15446
15447 -NEXT_PAGE(level1_fixmap_pgt)
15448 +NEXT_PAGE(level1_vsyscall_pgt)
15449 .fill 512,8,0
15450
15451 -NEXT_PAGE(level2_ident_pgt)
15452 - /* Since I easily can, map the first 1G.
15453 + /* Since I easily can, map the first 2G.
15454 * Don't set NX because code runs from these pages.
15455 */
15456 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15457 +NEXT_PAGE(level2_ident_pgt)
15458 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15459
15460 NEXT_PAGE(level2_kernel_pgt)
15461 /*
15462 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15463 * If you want to increase this then increase MODULES_VADDR
15464 * too.)
15465 */
15466 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15467 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15468 -
15469 -NEXT_PAGE(level2_spare_pgt)
15470 - .fill 512, 8, 0
15471 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15472
15473 #undef PMDS
15474 #undef NEXT_PAGE
15475
15476 - .data
15477 + .align PAGE_SIZE
15478 +ENTRY(cpu_gdt_table)
15479 + .rept NR_CPUS
15480 + .quad 0x0000000000000000 /* NULL descriptor */
15481 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15482 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15483 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15484 + .quad 0x00cffb000000ffff /* __USER32_CS */
15485 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15486 + .quad 0x00affb000000ffff /* __USER_CS */
15487 +
15488 +#ifdef CONFIG_PAX_KERNEXEC
15489 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15490 +#else
15491 + .quad 0x0 /* unused */
15492 +#endif
15493 +
15494 + .quad 0,0 /* TSS */
15495 + .quad 0,0 /* LDT */
15496 + .quad 0,0,0 /* three TLS descriptors */
15497 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15498 + /* asm/segment.h:GDT_ENTRIES must match this */
15499 +
15500 + /* zero the remaining page */
15501 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15502 + .endr
15503 +
15504 .align 16
15505 .globl early_gdt_descr
15506 early_gdt_descr:
15507 .word GDT_ENTRIES*8-1
15508 early_gdt_descr_base:
15509 - .quad INIT_PER_CPU_VAR(gdt_page)
15510 + .quad cpu_gdt_table
15511
15512 ENTRY(phys_base)
15513 /* This must match the first entry in level2_kernel_pgt */
15514 .quad 0x0000000000000000
15515
15516 #include "../../x86/xen/xen-head.S"
15517 -
15518 - .section .bss, "aw", @nobits
15519 +
15520 + .section .rodata,"a",@progbits
15521 .align L1_CACHE_BYTES
15522 ENTRY(idt_table)
15523 - .skip IDT_ENTRIES * 16
15524 + .fill 512,8,0
15525
15526 __PAGE_ALIGNED_BSS
15527 .align PAGE_SIZE
15528 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15529 index 9c3bd4a..e1d9b35 100644
15530 --- a/arch/x86/kernel/i386_ksyms_32.c
15531 +++ b/arch/x86/kernel/i386_ksyms_32.c
15532 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15533 EXPORT_SYMBOL(cmpxchg8b_emu);
15534 #endif
15535
15536 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15537 +
15538 /* Networking helper routines. */
15539 EXPORT_SYMBOL(csum_partial_copy_generic);
15540 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15541 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15542
15543 EXPORT_SYMBOL(__get_user_1);
15544 EXPORT_SYMBOL(__get_user_2);
15545 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15546
15547 EXPORT_SYMBOL(csum_partial);
15548 EXPORT_SYMBOL(empty_zero_page);
15549 +
15550 +#ifdef CONFIG_PAX_KERNEXEC
15551 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15552 +#endif
15553 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15554 index 6104852..6114160 100644
15555 --- a/arch/x86/kernel/i8259.c
15556 +++ b/arch/x86/kernel/i8259.c
15557 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15558 "spurious 8259A interrupt: IRQ%d.\n", irq);
15559 spurious_irq_mask |= irqmask;
15560 }
15561 - atomic_inc(&irq_err_count);
15562 + atomic_inc_unchecked(&irq_err_count);
15563 /*
15564 * Theoretically we do not have to handle this IRQ,
15565 * but in Linux this does not cause problems and is
15566 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15567 index 43e9ccf..44ccf6f 100644
15568 --- a/arch/x86/kernel/init_task.c
15569 +++ b/arch/x86/kernel/init_task.c
15570 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15571 * way process stacks are handled. This is done by having a special
15572 * "init_task" linker map entry..
15573 */
15574 -union thread_union init_thread_union __init_task_data =
15575 - { INIT_THREAD_INFO(init_task) };
15576 +union thread_union init_thread_union __init_task_data;
15577
15578 /*
15579 * Initial task structure.
15580 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15581 * section. Since TSS's are completely CPU-local, we want them
15582 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15583 */
15584 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15585 -
15586 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15587 +EXPORT_SYMBOL(init_tss);
15588 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15589 index 8c96897..be66bfa 100644
15590 --- a/arch/x86/kernel/ioport.c
15591 +++ b/arch/x86/kernel/ioport.c
15592 @@ -6,6 +6,7 @@
15593 #include <linux/sched.h>
15594 #include <linux/kernel.h>
15595 #include <linux/capability.h>
15596 +#include <linux/security.h>
15597 #include <linux/errno.h>
15598 #include <linux/types.h>
15599 #include <linux/ioport.h>
15600 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15601
15602 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15603 return -EINVAL;
15604 +#ifdef CONFIG_GRKERNSEC_IO
15605 + if (turn_on && grsec_disable_privio) {
15606 + gr_handle_ioperm();
15607 + return -EPERM;
15608 + }
15609 +#endif
15610 if (turn_on && !capable(CAP_SYS_RAWIO))
15611 return -EPERM;
15612
15613 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15614 * because the ->io_bitmap_max value must match the bitmap
15615 * contents:
15616 */
15617 - tss = &per_cpu(init_tss, get_cpu());
15618 + tss = init_tss + get_cpu();
15619
15620 if (turn_on)
15621 bitmap_clear(t->io_bitmap_ptr, from, num);
15622 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15623 return -EINVAL;
15624 /* Trying to gain more privileges? */
15625 if (level > old) {
15626 +#ifdef CONFIG_GRKERNSEC_IO
15627 + if (grsec_disable_privio) {
15628 + gr_handle_iopl();
15629 + return -EPERM;
15630 + }
15631 +#endif
15632 if (!capable(CAP_SYS_RAWIO))
15633 return -EPERM;
15634 }
15635 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15636 index 429e0c9..17b3ece 100644
15637 --- a/arch/x86/kernel/irq.c
15638 +++ b/arch/x86/kernel/irq.c
15639 @@ -18,7 +18,7 @@
15640 #include <asm/mce.h>
15641 #include <asm/hw_irq.h>
15642
15643 -atomic_t irq_err_count;
15644 +atomic_unchecked_t irq_err_count;
15645
15646 /* Function pointer for generic interrupt vector handling */
15647 void (*x86_platform_ipi_callback)(void) = NULL;
15648 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15649 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15650 seq_printf(p, " Machine check polls\n");
15651 #endif
15652 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15653 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15654 #if defined(CONFIG_X86_IO_APIC)
15655 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15656 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15657 #endif
15658 return 0;
15659 }
15660 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15661
15662 u64 arch_irq_stat(void)
15663 {
15664 - u64 sum = atomic_read(&irq_err_count);
15665 + u64 sum = atomic_read_unchecked(&irq_err_count);
15666
15667 #ifdef CONFIG_X86_IO_APIC
15668 - sum += atomic_read(&irq_mis_count);
15669 + sum += atomic_read_unchecked(&irq_mis_count);
15670 #endif
15671 return sum;
15672 }
15673 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15674 index 7209070..cbcd71a 100644
15675 --- a/arch/x86/kernel/irq_32.c
15676 +++ b/arch/x86/kernel/irq_32.c
15677 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15678 __asm__ __volatile__("andl %%esp,%0" :
15679 "=r" (sp) : "0" (THREAD_SIZE - 1));
15680
15681 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15682 + return sp < STACK_WARN;
15683 }
15684
15685 static void print_stack_overflow(void)
15686 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15687 * per-CPU IRQ handling contexts (thread information and stack)
15688 */
15689 union irq_ctx {
15690 - struct thread_info tinfo;
15691 - u32 stack[THREAD_SIZE/sizeof(u32)];
15692 + unsigned long previous_esp;
15693 + u32 stack[THREAD_SIZE/sizeof(u32)];
15694 } __attribute__((aligned(THREAD_SIZE)));
15695
15696 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15697 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15698 static inline int
15699 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15700 {
15701 - union irq_ctx *curctx, *irqctx;
15702 + union irq_ctx *irqctx;
15703 u32 *isp, arg1, arg2;
15704
15705 - curctx = (union irq_ctx *) current_thread_info();
15706 irqctx = __this_cpu_read(hardirq_ctx);
15707
15708 /*
15709 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15710 * handler) we can't do that and just have to keep using the
15711 * current stack (which is the irq stack already after all)
15712 */
15713 - if (unlikely(curctx == irqctx))
15714 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15715 return 0;
15716
15717 /* build the stack frame on the IRQ stack */
15718 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15719 - irqctx->tinfo.task = curctx->tinfo.task;
15720 - irqctx->tinfo.previous_esp = current_stack_pointer;
15721 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15722 + irqctx->previous_esp = current_stack_pointer;
15723
15724 - /*
15725 - * Copy the softirq bits in preempt_count so that the
15726 - * softirq checks work in the hardirq context.
15727 - */
15728 - irqctx->tinfo.preempt_count =
15729 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15730 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15731 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15732 + __set_fs(MAKE_MM_SEG(0));
15733 +#endif
15734
15735 if (unlikely(overflow))
15736 call_on_stack(print_stack_overflow, isp);
15737 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15738 : "0" (irq), "1" (desc), "2" (isp),
15739 "D" (desc->handle_irq)
15740 : "memory", "cc", "ecx");
15741 +
15742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15743 + __set_fs(current_thread_info()->addr_limit);
15744 +#endif
15745 +
15746 return 1;
15747 }
15748
15749 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15750 */
15751 void __cpuinit irq_ctx_init(int cpu)
15752 {
15753 - union irq_ctx *irqctx;
15754 -
15755 if (per_cpu(hardirq_ctx, cpu))
15756 return;
15757
15758 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15759 - THREAD_FLAGS,
15760 - THREAD_ORDER));
15761 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15762 - irqctx->tinfo.cpu = cpu;
15763 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15764 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15765 -
15766 - per_cpu(hardirq_ctx, cpu) = irqctx;
15767 -
15768 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15769 - THREAD_FLAGS,
15770 - THREAD_ORDER));
15771 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15772 - irqctx->tinfo.cpu = cpu;
15773 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15774 -
15775 - per_cpu(softirq_ctx, cpu) = irqctx;
15776 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15777 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15778
15779 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15780 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15781 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15782 asmlinkage void do_softirq(void)
15783 {
15784 unsigned long flags;
15785 - struct thread_info *curctx;
15786 union irq_ctx *irqctx;
15787 u32 *isp;
15788
15789 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15790 local_irq_save(flags);
15791
15792 if (local_softirq_pending()) {
15793 - curctx = current_thread_info();
15794 irqctx = __this_cpu_read(softirq_ctx);
15795 - irqctx->tinfo.task = curctx->task;
15796 - irqctx->tinfo.previous_esp = current_stack_pointer;
15797 + irqctx->previous_esp = current_stack_pointer;
15798
15799 /* build the stack frame on the softirq stack */
15800 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15801 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15802 +
15803 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15804 + __set_fs(MAKE_MM_SEG(0));
15805 +#endif
15806
15807 call_on_stack(__do_softirq, isp);
15808 +
15809 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15810 + __set_fs(current_thread_info()->addr_limit);
15811 +#endif
15812 +
15813 /*
15814 * Shouldn't happen, we returned above if in_interrupt():
15815 */
15816 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15817 index 69bca46..0bac999 100644
15818 --- a/arch/x86/kernel/irq_64.c
15819 +++ b/arch/x86/kernel/irq_64.c
15820 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15821 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15822 u64 curbase = (u64)task_stack_page(current);
15823
15824 - if (user_mode_vm(regs))
15825 + if (user_mode(regs))
15826 return;
15827
15828 WARN_ONCE(regs->sp >= curbase &&
15829 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15830 index faba577..93b9e71 100644
15831 --- a/arch/x86/kernel/kgdb.c
15832 +++ b/arch/x86/kernel/kgdb.c
15833 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15834 #ifdef CONFIG_X86_32
15835 switch (regno) {
15836 case GDB_SS:
15837 - if (!user_mode_vm(regs))
15838 + if (!user_mode(regs))
15839 *(unsigned long *)mem = __KERNEL_DS;
15840 break;
15841 case GDB_SP:
15842 - if (!user_mode_vm(regs))
15843 + if (!user_mode(regs))
15844 *(unsigned long *)mem = kernel_stack_pointer(regs);
15845 break;
15846 case GDB_GS:
15847 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15848 case 'k':
15849 /* clear the trace bit */
15850 linux_regs->flags &= ~X86_EFLAGS_TF;
15851 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15852 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15853
15854 /* set the trace bit if we're stepping */
15855 if (remcomInBuffer[0] == 's') {
15856 linux_regs->flags |= X86_EFLAGS_TF;
15857 - atomic_set(&kgdb_cpu_doing_single_step,
15858 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15859 raw_smp_processor_id());
15860 }
15861
15862 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15863
15864 switch (cmd) {
15865 case DIE_DEBUG:
15866 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15867 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15868 if (user_mode(regs))
15869 return single_step_cont(regs, args);
15870 break;
15871 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15872 index 7da647d..56fe348 100644
15873 --- a/arch/x86/kernel/kprobes.c
15874 +++ b/arch/x86/kernel/kprobes.c
15875 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15876 } __attribute__((packed)) *insn;
15877
15878 insn = (struct __arch_relative_insn *)from;
15879 +
15880 + pax_open_kernel();
15881 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15882 insn->op = op;
15883 + pax_close_kernel();
15884 }
15885
15886 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15887 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15888 kprobe_opcode_t opcode;
15889 kprobe_opcode_t *orig_opcodes = opcodes;
15890
15891 - if (search_exception_tables((unsigned long)opcodes))
15892 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15893 return 0; /* Page fault may occur on this address. */
15894
15895 retry:
15896 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15897 }
15898 }
15899 insn_get_length(&insn);
15900 + pax_open_kernel();
15901 memcpy(dest, insn.kaddr, insn.length);
15902 + pax_close_kernel();
15903
15904 #ifdef CONFIG_X86_64
15905 if (insn_rip_relative(&insn)) {
15906 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15907 (u8 *) dest;
15908 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15909 disp = (u8 *) dest + insn_offset_displacement(&insn);
15910 + pax_open_kernel();
15911 *(s32 *) disp = (s32) newdisp;
15912 + pax_close_kernel();
15913 }
15914 #endif
15915 return insn.length;
15916 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15917 */
15918 __copy_instruction(p->ainsn.insn, p->addr, 0);
15919
15920 - if (can_boost(p->addr))
15921 + if (can_boost(ktla_ktva(p->addr)))
15922 p->ainsn.boostable = 0;
15923 else
15924 p->ainsn.boostable = -1;
15925
15926 - p->opcode = *p->addr;
15927 + p->opcode = *(ktla_ktva(p->addr));
15928 }
15929
15930 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15931 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15932 * nor set current_kprobe, because it doesn't use single
15933 * stepping.
15934 */
15935 - regs->ip = (unsigned long)p->ainsn.insn;
15936 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15937 preempt_enable_no_resched();
15938 return;
15939 }
15940 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15941 if (p->opcode == BREAKPOINT_INSTRUCTION)
15942 regs->ip = (unsigned long)p->addr;
15943 else
15944 - regs->ip = (unsigned long)p->ainsn.insn;
15945 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15946 }
15947
15948 /*
15949 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15950 setup_singlestep(p, regs, kcb, 0);
15951 return 1;
15952 }
15953 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15954 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15955 /*
15956 * The breakpoint instruction was removed right
15957 * after we hit it. Another cpu has removed
15958 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15959 " movq %rax, 152(%rsp)\n"
15960 RESTORE_REGS_STRING
15961 " popfq\n"
15962 +#ifdef KERNEXEC_PLUGIN
15963 + " btsq $63,(%rsp)\n"
15964 +#endif
15965 #else
15966 " pushf\n"
15967 SAVE_REGS_STRING
15968 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15969 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15970 {
15971 unsigned long *tos = stack_addr(regs);
15972 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15973 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15974 unsigned long orig_ip = (unsigned long)p->addr;
15975 kprobe_opcode_t *insn = p->ainsn.insn;
15976
15977 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15978 struct die_args *args = data;
15979 int ret = NOTIFY_DONE;
15980
15981 - if (args->regs && user_mode_vm(args->regs))
15982 + if (args->regs && user_mode(args->regs))
15983 return ret;
15984
15985 switch (val) {
15986 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15987 * Verify if the address gap is in 2GB range, because this uses
15988 * a relative jump.
15989 */
15990 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15991 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15992 if (abs(rel) > 0x7fffffff)
15993 return -ERANGE;
15994
15995 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15996 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15997
15998 /* Set probe function call */
15999 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
16000 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
16001
16002 /* Set returning jmp instruction at the tail of out-of-line buffer */
16003 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
16004 - (u8 *)op->kp.addr + op->optinsn.size);
16005 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
16006
16007 flush_icache_range((unsigned long) buf,
16008 (unsigned long) buf + TMPL_END_IDX +
16009 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
16010 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
16011
16012 /* Backup instructions which will be replaced by jump address */
16013 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
16014 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
16015 RELATIVE_ADDR_SIZE);
16016
16017 insn_buf[0] = RELATIVEJUMP_OPCODE;
16018 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
16019 index a9c2116..a52d4fc 100644
16020 --- a/arch/x86/kernel/kvm.c
16021 +++ b/arch/x86/kernel/kvm.c
16022 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
16023 pv_mmu_ops.set_pud = kvm_set_pud;
16024 #if PAGETABLE_LEVELS == 4
16025 pv_mmu_ops.set_pgd = kvm_set_pgd;
16026 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
16027 #endif
16028 #endif
16029 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
16030 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
16031 index ea69726..604d066 100644
16032 --- a/arch/x86/kernel/ldt.c
16033 +++ b/arch/x86/kernel/ldt.c
16034 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
16035 if (reload) {
16036 #ifdef CONFIG_SMP
16037 preempt_disable();
16038 - load_LDT(pc);
16039 + load_LDT_nolock(pc);
16040 if (!cpumask_equal(mm_cpumask(current->mm),
16041 cpumask_of(smp_processor_id())))
16042 smp_call_function(flush_ldt, current->mm, 1);
16043 preempt_enable();
16044 #else
16045 - load_LDT(pc);
16046 + load_LDT_nolock(pc);
16047 #endif
16048 }
16049 if (oldsize) {
16050 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
16051 return err;
16052
16053 for (i = 0; i < old->size; i++)
16054 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
16055 + write_ldt_entry(new->ldt, i, old->ldt + i);
16056 return 0;
16057 }
16058
16059 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
16060 retval = copy_ldt(&mm->context, &old_mm->context);
16061 mutex_unlock(&old_mm->context.lock);
16062 }
16063 +
16064 + if (tsk == current) {
16065 + mm->context.vdso = 0;
16066 +
16067 +#ifdef CONFIG_X86_32
16068 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16069 + mm->context.user_cs_base = 0UL;
16070 + mm->context.user_cs_limit = ~0UL;
16071 +
16072 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16073 + cpus_clear(mm->context.cpu_user_cs_mask);
16074 +#endif
16075 +
16076 +#endif
16077 +#endif
16078 +
16079 + }
16080 +
16081 return retval;
16082 }
16083
16084 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
16085 }
16086 }
16087
16088 +#ifdef CONFIG_PAX_SEGMEXEC
16089 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
16090 + error = -EINVAL;
16091 + goto out_unlock;
16092 + }
16093 +#endif
16094 +
16095 fill_ldt(&ldt, &ldt_info);
16096 if (oldmode)
16097 ldt.avl = 0;
16098 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
16099 index a3fa43b..8966f4c 100644
16100 --- a/arch/x86/kernel/machine_kexec_32.c
16101 +++ b/arch/x86/kernel/machine_kexec_32.c
16102 @@ -27,7 +27,7 @@
16103 #include <asm/cacheflush.h>
16104 #include <asm/debugreg.h>
16105
16106 -static void set_idt(void *newidt, __u16 limit)
16107 +static void set_idt(struct desc_struct *newidt, __u16 limit)
16108 {
16109 struct desc_ptr curidt;
16110
16111 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
16112 }
16113
16114
16115 -static void set_gdt(void *newgdt, __u16 limit)
16116 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
16117 {
16118 struct desc_ptr curgdt;
16119
16120 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
16121 }
16122
16123 control_page = page_address(image->control_code_page);
16124 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
16125 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
16126
16127 relocate_kernel_ptr = control_page;
16128 page_list[PA_CONTROL_PAGE] = __pa(control_page);
16129 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
16130 index 3ca42d0..7cff8cc 100644
16131 --- a/arch/x86/kernel/microcode_intel.c
16132 +++ b/arch/x86/kernel/microcode_intel.c
16133 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
16134
16135 static int get_ucode_user(void *to, const void *from, size_t n)
16136 {
16137 - return copy_from_user(to, from, n);
16138 + return copy_from_user(to, (const void __force_user *)from, n);
16139 }
16140
16141 static enum ucode_state
16142 request_microcode_user(int cpu, const void __user *buf, size_t size)
16143 {
16144 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
16145 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
16146 }
16147
16148 static void microcode_fini_cpu(int cpu)
16149 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
16150 index 925179f..267ac7a 100644
16151 --- a/arch/x86/kernel/module.c
16152 +++ b/arch/x86/kernel/module.c
16153 @@ -36,15 +36,60 @@
16154 #define DEBUGP(fmt...)
16155 #endif
16156
16157 -void *module_alloc(unsigned long size)
16158 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
16159 {
16160 - if (PAGE_ALIGN(size) > MODULES_LEN)
16161 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
16162 return NULL;
16163 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
16164 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
16165 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
16166 -1, __builtin_return_address(0));
16167 }
16168
16169 +void *module_alloc(unsigned long size)
16170 +{
16171 +
16172 +#ifdef CONFIG_PAX_KERNEXEC
16173 + return __module_alloc(size, PAGE_KERNEL);
16174 +#else
16175 + return __module_alloc(size, PAGE_KERNEL_EXEC);
16176 +#endif
16177 +
16178 +}
16179 +
16180 +#ifdef CONFIG_PAX_KERNEXEC
16181 +#ifdef CONFIG_X86_32
16182 +void *module_alloc_exec(unsigned long size)
16183 +{
16184 + struct vm_struct *area;
16185 +
16186 + if (size == 0)
16187 + return NULL;
16188 +
16189 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
16190 + return area ? area->addr : NULL;
16191 +}
16192 +EXPORT_SYMBOL(module_alloc_exec);
16193 +
16194 +void module_free_exec(struct module *mod, void *module_region)
16195 +{
16196 + vunmap(module_region);
16197 +}
16198 +EXPORT_SYMBOL(module_free_exec);
16199 +#else
16200 +void module_free_exec(struct module *mod, void *module_region)
16201 +{
16202 + module_free(mod, module_region);
16203 +}
16204 +EXPORT_SYMBOL(module_free_exec);
16205 +
16206 +void *module_alloc_exec(unsigned long size)
16207 +{
16208 + return __module_alloc(size, PAGE_KERNEL_RX);
16209 +}
16210 +EXPORT_SYMBOL(module_alloc_exec);
16211 +#endif
16212 +#endif
16213 +
16214 #ifdef CONFIG_X86_32
16215 int apply_relocate(Elf32_Shdr *sechdrs,
16216 const char *strtab,
16217 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16218 unsigned int i;
16219 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
16220 Elf32_Sym *sym;
16221 - uint32_t *location;
16222 + uint32_t *plocation, location;
16223
16224 DEBUGP("Applying relocate section %u to %u\n", relsec,
16225 sechdrs[relsec].sh_info);
16226 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
16227 /* This is where to make the change */
16228 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
16229 - + rel[i].r_offset;
16230 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
16231 + location = (uint32_t)plocation;
16232 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
16233 + plocation = ktla_ktva((void *)plocation);
16234 /* This is the symbol it is referring to. Note that all
16235 undefined symbols have been resolved. */
16236 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16237 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16238 switch (ELF32_R_TYPE(rel[i].r_info)) {
16239 case R_386_32:
16240 /* We add the value into the location given */
16241 - *location += sym->st_value;
16242 + pax_open_kernel();
16243 + *plocation += sym->st_value;
16244 + pax_close_kernel();
16245 break;
16246 case R_386_PC32:
16247 /* Add the value, subtract its postition */
16248 - *location += sym->st_value - (uint32_t)location;
16249 + pax_open_kernel();
16250 + *plocation += sym->st_value - location;
16251 + pax_close_kernel();
16252 break;
16253 default:
16254 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16255 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16256 case R_X86_64_NONE:
16257 break;
16258 case R_X86_64_64:
16259 + pax_open_kernel();
16260 *(u64 *)loc = val;
16261 + pax_close_kernel();
16262 break;
16263 case R_X86_64_32:
16264 + pax_open_kernel();
16265 *(u32 *)loc = val;
16266 + pax_close_kernel();
16267 if (val != *(u32 *)loc)
16268 goto overflow;
16269 break;
16270 case R_X86_64_32S:
16271 + pax_open_kernel();
16272 *(s32 *)loc = val;
16273 + pax_close_kernel();
16274 if ((s64)val != *(s32 *)loc)
16275 goto overflow;
16276 break;
16277 case R_X86_64_PC32:
16278 val -= (u64)loc;
16279 + pax_open_kernel();
16280 *(u32 *)loc = val;
16281 + pax_close_kernel();
16282 +
16283 #if 0
16284 if ((s64)val != *(s32 *)loc)
16285 goto overflow;
16286 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16287 index e88f37b..1353db6 100644
16288 --- a/arch/x86/kernel/nmi.c
16289 +++ b/arch/x86/kernel/nmi.c
16290 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16291 dotraplinkage notrace __kprobes void
16292 do_nmi(struct pt_regs *regs, long error_code)
16293 {
16294 +
16295 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16296 + if (!user_mode(regs)) {
16297 + unsigned long cs = regs->cs & 0xFFFF;
16298 + unsigned long ip = ktva_ktla(regs->ip);
16299 +
16300 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16301 + regs->ip = ip;
16302 + }
16303 +#endif
16304 +
16305 nmi_enter();
16306
16307 inc_irq_stat(__nmi_count);
16308 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16309 index 676b8c7..870ba04 100644
16310 --- a/arch/x86/kernel/paravirt-spinlocks.c
16311 +++ b/arch/x86/kernel/paravirt-spinlocks.c
16312 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16313 arch_spin_lock(lock);
16314 }
16315
16316 -struct pv_lock_ops pv_lock_ops = {
16317 +struct pv_lock_ops pv_lock_ops __read_only = {
16318 #ifdef CONFIG_SMP
16319 .spin_is_locked = __ticket_spin_is_locked,
16320 .spin_is_contended = __ticket_spin_is_contended,
16321 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16322 index d90272e..6bb013b 100644
16323 --- a/arch/x86/kernel/paravirt.c
16324 +++ b/arch/x86/kernel/paravirt.c
16325 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16326 {
16327 return x;
16328 }
16329 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16330 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16331 +#endif
16332
16333 void __init default_banner(void)
16334 {
16335 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16336 if (opfunc == NULL)
16337 /* If there's no function, patch it with a ud2a (BUG) */
16338 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16339 - else if (opfunc == _paravirt_nop)
16340 + else if (opfunc == (void *)_paravirt_nop)
16341 /* If the operation is a nop, then nop the callsite */
16342 ret = paravirt_patch_nop();
16343
16344 /* identity functions just return their single argument */
16345 - else if (opfunc == _paravirt_ident_32)
16346 + else if (opfunc == (void *)_paravirt_ident_32)
16347 ret = paravirt_patch_ident_32(insnbuf, len);
16348 - else if (opfunc == _paravirt_ident_64)
16349 + else if (opfunc == (void *)_paravirt_ident_64)
16350 ret = paravirt_patch_ident_64(insnbuf, len);
16351 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16352 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16353 + ret = paravirt_patch_ident_64(insnbuf, len);
16354 +#endif
16355
16356 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16357 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16358 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16359 if (insn_len > len || start == NULL)
16360 insn_len = len;
16361 else
16362 - memcpy(insnbuf, start, insn_len);
16363 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16364
16365 return insn_len;
16366 }
16367 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16368 preempt_enable();
16369 }
16370
16371 -struct pv_info pv_info = {
16372 +struct pv_info pv_info __read_only = {
16373 .name = "bare hardware",
16374 .paravirt_enabled = 0,
16375 .kernel_rpl = 0,
16376 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
16377 #endif
16378 };
16379
16380 -struct pv_init_ops pv_init_ops = {
16381 +struct pv_init_ops pv_init_ops __read_only = {
16382 .patch = native_patch,
16383 };
16384
16385 -struct pv_time_ops pv_time_ops = {
16386 +struct pv_time_ops pv_time_ops __read_only = {
16387 .sched_clock = native_sched_clock,
16388 .steal_clock = native_steal_clock,
16389 };
16390
16391 -struct pv_irq_ops pv_irq_ops = {
16392 +struct pv_irq_ops pv_irq_ops __read_only = {
16393 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16394 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16395 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16396 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16397 #endif
16398 };
16399
16400 -struct pv_cpu_ops pv_cpu_ops = {
16401 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16402 .cpuid = native_cpuid,
16403 .get_debugreg = native_get_debugreg,
16404 .set_debugreg = native_set_debugreg,
16405 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16406 .end_context_switch = paravirt_nop,
16407 };
16408
16409 -struct pv_apic_ops pv_apic_ops = {
16410 +struct pv_apic_ops pv_apic_ops __read_only = {
16411 #ifdef CONFIG_X86_LOCAL_APIC
16412 .startup_ipi_hook = paravirt_nop,
16413 #endif
16414 };
16415
16416 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16417 +#ifdef CONFIG_X86_32
16418 +#ifdef CONFIG_X86_PAE
16419 +/* 64-bit pagetable entries */
16420 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16421 +#else
16422 /* 32-bit pagetable entries */
16423 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16424 +#endif
16425 #else
16426 /* 64-bit pagetable entries */
16427 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16428 #endif
16429
16430 -struct pv_mmu_ops pv_mmu_ops = {
16431 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16432
16433 .read_cr2 = native_read_cr2,
16434 .write_cr2 = native_write_cr2,
16435 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16436 .make_pud = PTE_IDENT,
16437
16438 .set_pgd = native_set_pgd,
16439 + .set_pgd_batched = native_set_pgd_batched,
16440 #endif
16441 #endif /* PAGETABLE_LEVELS >= 3 */
16442
16443 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16444 },
16445
16446 .set_fixmap = native_set_fixmap,
16447 +
16448 +#ifdef CONFIG_PAX_KERNEXEC
16449 + .pax_open_kernel = native_pax_open_kernel,
16450 + .pax_close_kernel = native_pax_close_kernel,
16451 +#endif
16452 +
16453 };
16454
16455 EXPORT_SYMBOL_GPL(pv_time_ops);
16456 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16457 index 35ccf75..7a15747 100644
16458 --- a/arch/x86/kernel/pci-iommu_table.c
16459 +++ b/arch/x86/kernel/pci-iommu_table.c
16460 @@ -2,7 +2,7 @@
16461 #include <asm/iommu_table.h>
16462 #include <linux/string.h>
16463 #include <linux/kallsyms.h>
16464 -
16465 +#include <linux/sched.h>
16466
16467 #define DEBUG 1
16468
16469 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16470 index ee5d4fb..426649b 100644
16471 --- a/arch/x86/kernel/process.c
16472 +++ b/arch/x86/kernel/process.c
16473 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16474
16475 void free_thread_info(struct thread_info *ti)
16476 {
16477 - free_thread_xstate(ti->task);
16478 free_pages((unsigned long)ti, THREAD_ORDER);
16479 }
16480
16481 +static struct kmem_cache *task_struct_cachep;
16482 +
16483 void arch_task_cache_init(void)
16484 {
16485 - task_xstate_cachep =
16486 - kmem_cache_create("task_xstate", xstate_size,
16487 + /* create a slab on which task_structs can be allocated */
16488 + task_struct_cachep =
16489 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16490 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16491 +
16492 + task_xstate_cachep =
16493 + kmem_cache_create("task_xstate", xstate_size,
16494 __alignof__(union thread_xstate),
16495 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16496 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16497 +}
16498 +
16499 +struct task_struct *alloc_task_struct_node(int node)
16500 +{
16501 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16502 +}
16503 +
16504 +void free_task_struct(struct task_struct *task)
16505 +{
16506 + free_thread_xstate(task);
16507 + kmem_cache_free(task_struct_cachep, task);
16508 }
16509
16510 /*
16511 @@ -70,7 +87,7 @@ void exit_thread(void)
16512 unsigned long *bp = t->io_bitmap_ptr;
16513
16514 if (bp) {
16515 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16516 + struct tss_struct *tss = init_tss + get_cpu();
16517
16518 t->io_bitmap_ptr = NULL;
16519 clear_thread_flag(TIF_IO_BITMAP);
16520 @@ -106,7 +123,7 @@ void show_regs_common(void)
16521
16522 printk(KERN_CONT "\n");
16523 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16524 - current->pid, current->comm, print_tainted(),
16525 + task_pid_nr(current), current->comm, print_tainted(),
16526 init_utsname()->release,
16527 (int)strcspn(init_utsname()->version, " "),
16528 init_utsname()->version);
16529 @@ -120,6 +137,9 @@ void flush_thread(void)
16530 {
16531 struct task_struct *tsk = current;
16532
16533 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16534 + loadsegment(gs, 0);
16535 +#endif
16536 flush_ptrace_hw_breakpoint(tsk);
16537 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16538 /*
16539 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16540 regs.di = (unsigned long) arg;
16541
16542 #ifdef CONFIG_X86_32
16543 - regs.ds = __USER_DS;
16544 - regs.es = __USER_DS;
16545 + regs.ds = __KERNEL_DS;
16546 + regs.es = __KERNEL_DS;
16547 regs.fs = __KERNEL_PERCPU;
16548 - regs.gs = __KERNEL_STACK_CANARY;
16549 + savesegment(gs, regs.gs);
16550 #else
16551 regs.ss = __KERNEL_DS;
16552 #endif
16553 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16554
16555 return ret;
16556 }
16557 -void stop_this_cpu(void *dummy)
16558 +__noreturn void stop_this_cpu(void *dummy)
16559 {
16560 local_irq_disable();
16561 /*
16562 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16563 }
16564 early_param("idle", idle_setup);
16565
16566 -unsigned long arch_align_stack(unsigned long sp)
16567 +#ifdef CONFIG_PAX_RANDKSTACK
16568 +void pax_randomize_kstack(struct pt_regs *regs)
16569 {
16570 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16571 - sp -= get_random_int() % 8192;
16572 - return sp & ~0xf;
16573 -}
16574 + struct thread_struct *thread = &current->thread;
16575 + unsigned long time;
16576
16577 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16578 -{
16579 - unsigned long range_end = mm->brk + 0x02000000;
16580 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16581 -}
16582 + if (!randomize_va_space)
16583 + return;
16584 +
16585 + if (v8086_mode(regs))
16586 + return;
16587
16588 + rdtscl(time);
16589 +
16590 + /* P4 seems to return a 0 LSB, ignore it */
16591 +#ifdef CONFIG_MPENTIUM4
16592 + time &= 0x3EUL;
16593 + time <<= 2;
16594 +#elif defined(CONFIG_X86_64)
16595 + time &= 0xFUL;
16596 + time <<= 4;
16597 +#else
16598 + time &= 0x1FUL;
16599 + time <<= 3;
16600 +#endif
16601 +
16602 + thread->sp0 ^= time;
16603 + load_sp0(init_tss + smp_processor_id(), thread);
16604 +
16605 +#ifdef CONFIG_X86_64
16606 + percpu_write(kernel_stack, thread->sp0);
16607 +#endif
16608 +}
16609 +#endif
16610 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16611 index 8598296..bfadef0 100644
16612 --- a/arch/x86/kernel/process_32.c
16613 +++ b/arch/x86/kernel/process_32.c
16614 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16615 unsigned long thread_saved_pc(struct task_struct *tsk)
16616 {
16617 return ((unsigned long *)tsk->thread.sp)[3];
16618 +//XXX return tsk->thread.eip;
16619 }
16620
16621 #ifndef CONFIG_SMP
16622 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16623 unsigned long sp;
16624 unsigned short ss, gs;
16625
16626 - if (user_mode_vm(regs)) {
16627 + if (user_mode(regs)) {
16628 sp = regs->sp;
16629 ss = regs->ss & 0xffff;
16630 - gs = get_user_gs(regs);
16631 } else {
16632 sp = kernel_stack_pointer(regs);
16633 savesegment(ss, ss);
16634 - savesegment(gs, gs);
16635 }
16636 + gs = get_user_gs(regs);
16637
16638 show_regs_common();
16639
16640 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16641 struct task_struct *tsk;
16642 int err;
16643
16644 - childregs = task_pt_regs(p);
16645 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16646 *childregs = *regs;
16647 childregs->ax = 0;
16648 childregs->sp = sp;
16649
16650 p->thread.sp = (unsigned long) childregs;
16651 p->thread.sp0 = (unsigned long) (childregs+1);
16652 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16653
16654 p->thread.ip = (unsigned long) ret_from_fork;
16655
16656 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16657 struct thread_struct *prev = &prev_p->thread,
16658 *next = &next_p->thread;
16659 int cpu = smp_processor_id();
16660 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16661 + struct tss_struct *tss = init_tss + cpu;
16662 fpu_switch_t fpu;
16663
16664 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16665 @@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16666 */
16667 lazy_save_gs(prev->gs);
16668
16669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16670 + __set_fs(task_thread_info(next_p)->addr_limit);
16671 +#endif
16672 +
16673 /*
16674 * Load the per-thread Thread-Local Storage descriptor.
16675 */
16676 @@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16677 */
16678 arch_end_context_switch(next_p);
16679
16680 + percpu_write(current_task, next_p);
16681 + percpu_write(current_tinfo, &next_p->tinfo);
16682 +
16683 /*
16684 * Restore %gs if needed (which is common)
16685 */
16686 @@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16687
16688 switch_fpu_finish(next_p, fpu);
16689
16690 - percpu_write(current_task, next_p);
16691 -
16692 return prev_p;
16693 }
16694
16695 @@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
16696 } while (count++ < 16);
16697 return 0;
16698 }
16699 -
16700 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16701 index 6a364a6..b147d11 100644
16702 --- a/arch/x86/kernel/process_64.c
16703 +++ b/arch/x86/kernel/process_64.c
16704 @@ -89,7 +89,7 @@ static void __exit_idle(void)
16705 void exit_idle(void)
16706 {
16707 /* idle loop has pid 0 */
16708 - if (current->pid)
16709 + if (task_pid_nr(current))
16710 return;
16711 __exit_idle();
16712 }
16713 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16714 struct pt_regs *childregs;
16715 struct task_struct *me = current;
16716
16717 - childregs = ((struct pt_regs *)
16718 - (THREAD_SIZE + task_stack_page(p))) - 1;
16719 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16720 *childregs = *regs;
16721
16722 childregs->ax = 0;
16723 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16724 p->thread.sp = (unsigned long) childregs;
16725 p->thread.sp0 = (unsigned long) (childregs+1);
16726 p->thread.usersp = me->thread.usersp;
16727 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16728
16729 set_tsk_thread_flag(p, TIF_FORK);
16730
16731 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16732 struct thread_struct *prev = &prev_p->thread;
16733 struct thread_struct *next = &next_p->thread;
16734 int cpu = smp_processor_id();
16735 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16736 + struct tss_struct *tss = init_tss + cpu;
16737 unsigned fsindex, gsindex;
16738 fpu_switch_t fpu;
16739
16740 @@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16741 prev->usersp = percpu_read(old_rsp);
16742 percpu_write(old_rsp, next->usersp);
16743 percpu_write(current_task, next_p);
16744 + percpu_write(current_tinfo, &next_p->tinfo);
16745
16746 - percpu_write(kernel_stack,
16747 - (unsigned long)task_stack_page(next_p) +
16748 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16749 + percpu_write(kernel_stack, next->sp0);
16750
16751 /*
16752 * Now maybe reload the debug registers and handle I/O bitmaps
16753 @@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p)
16754 if (!p || p == current || p->state == TASK_RUNNING)
16755 return 0;
16756 stack = (unsigned long)task_stack_page(p);
16757 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16758 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16759 return 0;
16760 fp = *(u64 *)(p->thread.sp);
16761 do {
16762 - if (fp < (unsigned long)stack ||
16763 - fp >= (unsigned long)stack+THREAD_SIZE)
16764 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16765 return 0;
16766 ip = *(u64 *)(fp+8);
16767 if (!in_sched_functions(ip))
16768 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16769 index 8252879..d3219e0 100644
16770 --- a/arch/x86/kernel/ptrace.c
16771 +++ b/arch/x86/kernel/ptrace.c
16772 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16773 unsigned long addr, unsigned long data)
16774 {
16775 int ret;
16776 - unsigned long __user *datap = (unsigned long __user *)data;
16777 + unsigned long __user *datap = (__force unsigned long __user *)data;
16778
16779 switch (request) {
16780 /* read the word at location addr in the USER area. */
16781 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16782 if ((int) addr < 0)
16783 return -EIO;
16784 ret = do_get_thread_area(child, addr,
16785 - (struct user_desc __user *)data);
16786 + (__force struct user_desc __user *) data);
16787 break;
16788
16789 case PTRACE_SET_THREAD_AREA:
16790 if ((int) addr < 0)
16791 return -EIO;
16792 ret = do_set_thread_area(child, addr,
16793 - (struct user_desc __user *)data, 0);
16794 + (__force struct user_desc __user *) data, 0);
16795 break;
16796 #endif
16797
16798 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16799 memset(info, 0, sizeof(*info));
16800 info->si_signo = SIGTRAP;
16801 info->si_code = si_code;
16802 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16803 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16804 }
16805
16806 void user_single_step_siginfo(struct task_struct *tsk,
16807 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16808 index 42eb330..139955c 100644
16809 --- a/arch/x86/kernel/pvclock.c
16810 +++ b/arch/x86/kernel/pvclock.c
16811 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16812 return pv_tsc_khz;
16813 }
16814
16815 -static atomic64_t last_value = ATOMIC64_INIT(0);
16816 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16817
16818 void pvclock_resume(void)
16819 {
16820 - atomic64_set(&last_value, 0);
16821 + atomic64_set_unchecked(&last_value, 0);
16822 }
16823
16824 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16825 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16826 * updating at the same time, and one of them could be slightly behind,
16827 * making the assumption that last_value always go forward fail to hold.
16828 */
16829 - last = atomic64_read(&last_value);
16830 + last = atomic64_read_unchecked(&last_value);
16831 do {
16832 if (ret < last)
16833 return last;
16834 - last = atomic64_cmpxchg(&last_value, last, ret);
16835 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16836 } while (unlikely(last != ret));
16837
16838 return ret;
16839 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16840 index 37a458b..e63d183 100644
16841 --- a/arch/x86/kernel/reboot.c
16842 +++ b/arch/x86/kernel/reboot.c
16843 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16844 EXPORT_SYMBOL(pm_power_off);
16845
16846 static const struct desc_ptr no_idt = {};
16847 -static int reboot_mode;
16848 +static unsigned short reboot_mode;
16849 enum reboot_type reboot_type = BOOT_ACPI;
16850 int reboot_force;
16851
16852 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16853 extern const unsigned char machine_real_restart_asm[];
16854 extern const u64 machine_real_restart_gdt[3];
16855
16856 -void machine_real_restart(unsigned int type)
16857 +__noreturn void machine_real_restart(unsigned int type)
16858 {
16859 void *restart_va;
16860 unsigned long restart_pa;
16861 - void (*restart_lowmem)(unsigned int);
16862 + void (* __noreturn restart_lowmem)(unsigned int);
16863 u64 *lowmem_gdt;
16864
16865 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16866 + struct desc_struct *gdt;
16867 +#endif
16868 +
16869 local_irq_disable();
16870
16871 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16872 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16873 boot)". This seems like a fairly standard thing that gets set by
16874 REBOOT.COM programs, and the previous reset routine did this
16875 too. */
16876 - *((unsigned short *)0x472) = reboot_mode;
16877 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16878
16879 /* Patch the GDT in the low memory trampoline */
16880 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16881
16882 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16883 restart_pa = virt_to_phys(restart_va);
16884 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16885 + restart_lowmem = (void *)restart_pa;
16886
16887 /* GDT[0]: GDT self-pointer */
16888 lowmem_gdt[0] =
16889 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16890 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16891
16892 /* Jump to the identity-mapped low memory code */
16893 +
16894 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16895 + gdt = get_cpu_gdt_table(smp_processor_id());
16896 + pax_open_kernel();
16897 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16898 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16899 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16900 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16901 +#endif
16902 +#ifdef CONFIG_PAX_KERNEXEC
16903 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16904 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16905 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16906 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16907 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16908 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16909 +#endif
16910 + pax_close_kernel();
16911 +#endif
16912 +
16913 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16914 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16915 + unreachable();
16916 +#else
16917 restart_lowmem(type);
16918 +#endif
16919 +
16920 }
16921 #ifdef CONFIG_APM_MODULE
16922 EXPORT_SYMBOL(machine_real_restart);
16923 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16924 * try to force a triple fault and then cycle between hitting the keyboard
16925 * controller and doing that
16926 */
16927 -static void native_machine_emergency_restart(void)
16928 +__noreturn static void native_machine_emergency_restart(void)
16929 {
16930 int i;
16931 int attempt = 0;
16932 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16933 #endif
16934 }
16935
16936 -static void __machine_emergency_restart(int emergency)
16937 +static __noreturn void __machine_emergency_restart(int emergency)
16938 {
16939 reboot_emergency = emergency;
16940 machine_ops.emergency_restart();
16941 }
16942
16943 -static void native_machine_restart(char *__unused)
16944 +static __noreturn void native_machine_restart(char *__unused)
16945 {
16946 printk("machine restart\n");
16947
16948 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16949 __machine_emergency_restart(0);
16950 }
16951
16952 -static void native_machine_halt(void)
16953 +static __noreturn void native_machine_halt(void)
16954 {
16955 /* stop other cpus and apics */
16956 machine_shutdown();
16957 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
16958 stop_this_cpu(NULL);
16959 }
16960
16961 -static void native_machine_power_off(void)
16962 +__noreturn static void native_machine_power_off(void)
16963 {
16964 if (pm_power_off) {
16965 if (!reboot_force)
16966 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16967 }
16968 /* a fallback in case there is no PM info available */
16969 tboot_shutdown(TB_SHUTDOWN_HALT);
16970 + unreachable();
16971 }
16972
16973 struct machine_ops machine_ops = {
16974 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16975 index 7a6f3b3..bed145d7 100644
16976 --- a/arch/x86/kernel/relocate_kernel_64.S
16977 +++ b/arch/x86/kernel/relocate_kernel_64.S
16978 @@ -11,6 +11,7 @@
16979 #include <asm/kexec.h>
16980 #include <asm/processor-flags.h>
16981 #include <asm/pgtable_types.h>
16982 +#include <asm/alternative-asm.h>
16983
16984 /*
16985 * Must be relocatable PIC code callable as a C function
16986 @@ -160,13 +161,14 @@ identity_mapped:
16987 xorq %rbp, %rbp
16988 xorq %r8, %r8
16989 xorq %r9, %r9
16990 - xorq %r10, %r9
16991 + xorq %r10, %r10
16992 xorq %r11, %r11
16993 xorq %r12, %r12
16994 xorq %r13, %r13
16995 xorq %r14, %r14
16996 xorq %r15, %r15
16997
16998 + pax_force_retaddr 0, 1
16999 ret
17000
17001 1:
17002 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
17003 index cf0ef98..e3f780b 100644
17004 --- a/arch/x86/kernel/setup.c
17005 +++ b/arch/x86/kernel/setup.c
17006 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
17007
17008 switch (data->type) {
17009 case SETUP_E820_EXT:
17010 - parse_e820_ext(data);
17011 + parse_e820_ext((struct setup_data __force_kernel *)data);
17012 break;
17013 case SETUP_DTB:
17014 add_dtb(pa_data);
17015 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
17016 * area (640->1Mb) as ram even though it is not.
17017 * take them out.
17018 */
17019 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
17020 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
17021 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
17022 }
17023
17024 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
17025
17026 if (!boot_params.hdr.root_flags)
17027 root_mountflags &= ~MS_RDONLY;
17028 - init_mm.start_code = (unsigned long) _text;
17029 - init_mm.end_code = (unsigned long) _etext;
17030 + init_mm.start_code = ktla_ktva((unsigned long) _text);
17031 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
17032 init_mm.end_data = (unsigned long) _edata;
17033 init_mm.brk = _brk_end;
17034
17035 - code_resource.start = virt_to_phys(_text);
17036 - code_resource.end = virt_to_phys(_etext)-1;
17037 - data_resource.start = virt_to_phys(_etext);
17038 + code_resource.start = virt_to_phys(ktla_ktva(_text));
17039 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
17040 + data_resource.start = virt_to_phys(_sdata);
17041 data_resource.end = virt_to_phys(_edata)-1;
17042 bss_resource.start = virt_to_phys(&__bss_start);
17043 bss_resource.end = virt_to_phys(&__bss_stop)-1;
17044 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
17045 index 71f4727..16dc9f7 100644
17046 --- a/arch/x86/kernel/setup_percpu.c
17047 +++ b/arch/x86/kernel/setup_percpu.c
17048 @@ -21,19 +21,17 @@
17049 #include <asm/cpu.h>
17050 #include <asm/stackprotector.h>
17051
17052 -DEFINE_PER_CPU(int, cpu_number);
17053 +#ifdef CONFIG_SMP
17054 +DEFINE_PER_CPU(unsigned int, cpu_number);
17055 EXPORT_PER_CPU_SYMBOL(cpu_number);
17056 +#endif
17057
17058 -#ifdef CONFIG_X86_64
17059 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
17060 -#else
17061 -#define BOOT_PERCPU_OFFSET 0
17062 -#endif
17063
17064 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
17065 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
17066
17067 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
17068 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
17069 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
17070 };
17071 EXPORT_SYMBOL(__per_cpu_offset);
17072 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
17073 {
17074 #ifdef CONFIG_X86_32
17075 struct desc_struct gdt;
17076 + unsigned long base = per_cpu_offset(cpu);
17077
17078 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
17079 - 0x2 | DESCTYPE_S, 0x8);
17080 - gdt.s = 1;
17081 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
17082 + 0x83 | DESCTYPE_S, 0xC);
17083 write_gdt_entry(get_cpu_gdt_table(cpu),
17084 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
17085 #endif
17086 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
17087 /* alrighty, percpu areas up and running */
17088 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
17089 for_each_possible_cpu(cpu) {
17090 +#ifdef CONFIG_CC_STACKPROTECTOR
17091 +#ifdef CONFIG_X86_32
17092 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
17093 +#endif
17094 +#endif
17095 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
17096 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
17097 per_cpu(cpu_number, cpu) = cpu;
17098 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
17099 */
17100 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
17101 #endif
17102 +#ifdef CONFIG_CC_STACKPROTECTOR
17103 +#ifdef CONFIG_X86_32
17104 + if (!cpu)
17105 + per_cpu(stack_canary.canary, cpu) = canary;
17106 +#endif
17107 +#endif
17108 /*
17109 * Up to this point, the boot CPU has been using .init.data
17110 * area. Reload any changed state for the boot CPU.
17111 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
17112 index 54ddaeb2..22c3bdc 100644
17113 --- a/arch/x86/kernel/signal.c
17114 +++ b/arch/x86/kernel/signal.c
17115 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
17116 * Align the stack pointer according to the i386 ABI,
17117 * i.e. so that on function entry ((sp + 4) & 15) == 0.
17118 */
17119 - sp = ((sp + 4) & -16ul) - 4;
17120 + sp = ((sp - 12) & -16ul) - 4;
17121 #else /* !CONFIG_X86_32 */
17122 sp = round_down(sp, 16) - 8;
17123 #endif
17124 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
17125 * Return an always-bogus address instead so we will die with SIGSEGV.
17126 */
17127 if (onsigstack && !likely(on_sig_stack(sp)))
17128 - return (void __user *)-1L;
17129 + return (__force void __user *)-1L;
17130
17131 /* save i387 state */
17132 if (used_math() && save_i387_xstate(*fpstate) < 0)
17133 - return (void __user *)-1L;
17134 + return (__force void __user *)-1L;
17135
17136 return (void __user *)sp;
17137 }
17138 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17139 }
17140
17141 if (current->mm->context.vdso)
17142 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17143 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17144 else
17145 - restorer = &frame->retcode;
17146 + restorer = (void __user *)&frame->retcode;
17147 if (ka->sa.sa_flags & SA_RESTORER)
17148 restorer = ka->sa.sa_restorer;
17149
17150 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17151 * reasons and because gdb uses it as a signature to notice
17152 * signal handler stack frames.
17153 */
17154 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
17155 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
17156
17157 if (err)
17158 return -EFAULT;
17159 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17160 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
17161
17162 /* Set up to return from userspace. */
17163 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17164 + if (current->mm->context.vdso)
17165 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17166 + else
17167 + restorer = (void __user *)&frame->retcode;
17168 if (ka->sa.sa_flags & SA_RESTORER)
17169 restorer = ka->sa.sa_restorer;
17170 put_user_ex(restorer, &frame->pretcode);
17171 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17172 * reasons and because gdb uses it as a signature to notice
17173 * signal handler stack frames.
17174 */
17175 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
17176 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
17177 } put_user_catch(err);
17178
17179 if (err)
17180 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
17181 * X86_32: vm86 regs switched out by assembly code before reaching
17182 * here, so testing against kernel CS suffices.
17183 */
17184 - if (!user_mode(regs))
17185 + if (!user_mode_novm(regs))
17186 return;
17187
17188 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
17189 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
17190 index 9f548cb..caf76f7 100644
17191 --- a/arch/x86/kernel/smpboot.c
17192 +++ b/arch/x86/kernel/smpboot.c
17193 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
17194 set_idle_for_cpu(cpu, c_idle.idle);
17195 do_rest:
17196 per_cpu(current_task, cpu) = c_idle.idle;
17197 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
17198 #ifdef CONFIG_X86_32
17199 /* Stack for startup_32 can be just as for start_secondary onwards */
17200 irq_ctx_init(cpu);
17201 #else
17202 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
17203 initial_gs = per_cpu_offset(cpu);
17204 - per_cpu(kernel_stack, cpu) =
17205 - (unsigned long)task_stack_page(c_idle.idle) -
17206 - KERNEL_STACK_OFFSET + THREAD_SIZE;
17207 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
17208 #endif
17209 +
17210 + pax_open_kernel();
17211 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17212 + pax_close_kernel();
17213 +
17214 initial_code = (unsigned long)start_secondary;
17215 stack_start = c_idle.idle->thread.sp;
17216
17217 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
17218
17219 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
17220
17221 +#ifdef CONFIG_PAX_PER_CPU_PGD
17222 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
17223 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17224 + KERNEL_PGD_PTRS);
17225 +#endif
17226 +
17227 err = do_boot_cpu(apicid, cpu);
17228 if (err) {
17229 pr_debug("do_boot_cpu failed %d\n", err);
17230 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
17231 index c346d11..d43b163 100644
17232 --- a/arch/x86/kernel/step.c
17233 +++ b/arch/x86/kernel/step.c
17234 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17235 struct desc_struct *desc;
17236 unsigned long base;
17237
17238 - seg &= ~7UL;
17239 + seg >>= 3;
17240
17241 mutex_lock(&child->mm->context.lock);
17242 - if (unlikely((seg >> 3) >= child->mm->context.size))
17243 + if (unlikely(seg >= child->mm->context.size))
17244 addr = -1L; /* bogus selector, access would fault */
17245 else {
17246 desc = child->mm->context.ldt + seg;
17247 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17248 addr += base;
17249 }
17250 mutex_unlock(&child->mm->context.lock);
17251 - }
17252 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17253 + addr = ktla_ktva(addr);
17254
17255 return addr;
17256 }
17257 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17258 unsigned char opcode[15];
17259 unsigned long addr = convert_ip_to_linear(child, regs);
17260
17261 + if (addr == -EINVAL)
17262 + return 0;
17263 +
17264 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17265 for (i = 0; i < copied; i++) {
17266 switch (opcode[i]) {
17267 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17268 index 0b0cb5f..db6b9ed 100644
17269 --- a/arch/x86/kernel/sys_i386_32.c
17270 +++ b/arch/x86/kernel/sys_i386_32.c
17271 @@ -24,17 +24,224 @@
17272
17273 #include <asm/syscalls.h>
17274
17275 -/*
17276 - * Do a system call from kernel instead of calling sys_execve so we
17277 - * end up with proper pt_regs.
17278 - */
17279 -int kernel_execve(const char *filename,
17280 - const char *const argv[],
17281 - const char *const envp[])
17282 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17283 {
17284 - long __res;
17285 - asm volatile ("int $0x80"
17286 - : "=a" (__res)
17287 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17288 - return __res;
17289 + unsigned long pax_task_size = TASK_SIZE;
17290 +
17291 +#ifdef CONFIG_PAX_SEGMEXEC
17292 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17293 + pax_task_size = SEGMEXEC_TASK_SIZE;
17294 +#endif
17295 +
17296 + if (len > pax_task_size || addr > pax_task_size - len)
17297 + return -EINVAL;
17298 +
17299 + return 0;
17300 +}
17301 +
17302 +unsigned long
17303 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17304 + unsigned long len, unsigned long pgoff, unsigned long flags)
17305 +{
17306 + struct mm_struct *mm = current->mm;
17307 + struct vm_area_struct *vma;
17308 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17309 +
17310 +#ifdef CONFIG_PAX_SEGMEXEC
17311 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17312 + pax_task_size = SEGMEXEC_TASK_SIZE;
17313 +#endif
17314 +
17315 + pax_task_size -= PAGE_SIZE;
17316 +
17317 + if (len > pax_task_size)
17318 + return -ENOMEM;
17319 +
17320 + if (flags & MAP_FIXED)
17321 + return addr;
17322 +
17323 +#ifdef CONFIG_PAX_RANDMMAP
17324 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17325 +#endif
17326 +
17327 + if (addr) {
17328 + addr = PAGE_ALIGN(addr);
17329 + if (pax_task_size - len >= addr) {
17330 + vma = find_vma(mm, addr);
17331 + if (check_heap_stack_gap(vma, addr, len))
17332 + return addr;
17333 + }
17334 + }
17335 + if (len > mm->cached_hole_size) {
17336 + start_addr = addr = mm->free_area_cache;
17337 + } else {
17338 + start_addr = addr = mm->mmap_base;
17339 + mm->cached_hole_size = 0;
17340 + }
17341 +
17342 +#ifdef CONFIG_PAX_PAGEEXEC
17343 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17344 + start_addr = 0x00110000UL;
17345 +
17346 +#ifdef CONFIG_PAX_RANDMMAP
17347 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17348 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17349 +#endif
17350 +
17351 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17352 + start_addr = addr = mm->mmap_base;
17353 + else
17354 + addr = start_addr;
17355 + }
17356 +#endif
17357 +
17358 +full_search:
17359 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17360 + /* At this point: (!vma || addr < vma->vm_end). */
17361 + if (pax_task_size - len < addr) {
17362 + /*
17363 + * Start a new search - just in case we missed
17364 + * some holes.
17365 + */
17366 + if (start_addr != mm->mmap_base) {
17367 + start_addr = addr = mm->mmap_base;
17368 + mm->cached_hole_size = 0;
17369 + goto full_search;
17370 + }
17371 + return -ENOMEM;
17372 + }
17373 + if (check_heap_stack_gap(vma, addr, len))
17374 + break;
17375 + if (addr + mm->cached_hole_size < vma->vm_start)
17376 + mm->cached_hole_size = vma->vm_start - addr;
17377 + addr = vma->vm_end;
17378 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17379 + start_addr = addr = mm->mmap_base;
17380 + mm->cached_hole_size = 0;
17381 + goto full_search;
17382 + }
17383 + }
17384 +
17385 + /*
17386 + * Remember the place where we stopped the search:
17387 + */
17388 + mm->free_area_cache = addr + len;
17389 + return addr;
17390 +}
17391 +
17392 +unsigned long
17393 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17394 + const unsigned long len, const unsigned long pgoff,
17395 + const unsigned long flags)
17396 +{
17397 + struct vm_area_struct *vma;
17398 + struct mm_struct *mm = current->mm;
17399 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17400 +
17401 +#ifdef CONFIG_PAX_SEGMEXEC
17402 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17403 + pax_task_size = SEGMEXEC_TASK_SIZE;
17404 +#endif
17405 +
17406 + pax_task_size -= PAGE_SIZE;
17407 +
17408 + /* requested length too big for entire address space */
17409 + if (len > pax_task_size)
17410 + return -ENOMEM;
17411 +
17412 + if (flags & MAP_FIXED)
17413 + return addr;
17414 +
17415 +#ifdef CONFIG_PAX_PAGEEXEC
17416 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17417 + goto bottomup;
17418 +#endif
17419 +
17420 +#ifdef CONFIG_PAX_RANDMMAP
17421 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17422 +#endif
17423 +
17424 + /* requesting a specific address */
17425 + if (addr) {
17426 + addr = PAGE_ALIGN(addr);
17427 + if (pax_task_size - len >= addr) {
17428 + vma = find_vma(mm, addr);
17429 + if (check_heap_stack_gap(vma, addr, len))
17430 + return addr;
17431 + }
17432 + }
17433 +
17434 + /* check if free_area_cache is useful for us */
17435 + if (len <= mm->cached_hole_size) {
17436 + mm->cached_hole_size = 0;
17437 + mm->free_area_cache = mm->mmap_base;
17438 + }
17439 +
17440 + /* either no address requested or can't fit in requested address hole */
17441 + addr = mm->free_area_cache;
17442 +
17443 + /* make sure it can fit in the remaining address space */
17444 + if (addr > len) {
17445 + vma = find_vma(mm, addr-len);
17446 + if (check_heap_stack_gap(vma, addr - len, len))
17447 + /* remember the address as a hint for next time */
17448 + return (mm->free_area_cache = addr-len);
17449 + }
17450 +
17451 + if (mm->mmap_base < len)
17452 + goto bottomup;
17453 +
17454 + addr = mm->mmap_base-len;
17455 +
17456 + do {
17457 + /*
17458 + * Lookup failure means no vma is above this address,
17459 + * else if new region fits below vma->vm_start,
17460 + * return with success:
17461 + */
17462 + vma = find_vma(mm, addr);
17463 + if (check_heap_stack_gap(vma, addr, len))
17464 + /* remember the address as a hint for next time */
17465 + return (mm->free_area_cache = addr);
17466 +
17467 + /* remember the largest hole we saw so far */
17468 + if (addr + mm->cached_hole_size < vma->vm_start)
17469 + mm->cached_hole_size = vma->vm_start - addr;
17470 +
17471 + /* try just below the current vma->vm_start */
17472 + addr = skip_heap_stack_gap(vma, len);
17473 + } while (!IS_ERR_VALUE(addr));
17474 +
17475 +bottomup:
17476 + /*
17477 + * A failed mmap() very likely causes application failure,
17478 + * so fall back to the bottom-up function here. This scenario
17479 + * can happen with large stack limits and large mmap()
17480 + * allocations.
17481 + */
17482 +
17483 +#ifdef CONFIG_PAX_SEGMEXEC
17484 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17485 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17486 + else
17487 +#endif
17488 +
17489 + mm->mmap_base = TASK_UNMAPPED_BASE;
17490 +
17491 +#ifdef CONFIG_PAX_RANDMMAP
17492 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17493 + mm->mmap_base += mm->delta_mmap;
17494 +#endif
17495 +
17496 + mm->free_area_cache = mm->mmap_base;
17497 + mm->cached_hole_size = ~0UL;
17498 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17499 + /*
17500 + * Restore the topdown base:
17501 + */
17502 + mm->mmap_base = base;
17503 + mm->free_area_cache = base;
17504 + mm->cached_hole_size = ~0UL;
17505 +
17506 + return addr;
17507 }
17508 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17509 index 0514890..3dbebce 100644
17510 --- a/arch/x86/kernel/sys_x86_64.c
17511 +++ b/arch/x86/kernel/sys_x86_64.c
17512 @@ -95,8 +95,8 @@ out:
17513 return error;
17514 }
17515
17516 -static void find_start_end(unsigned long flags, unsigned long *begin,
17517 - unsigned long *end)
17518 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17519 + unsigned long *begin, unsigned long *end)
17520 {
17521 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17522 unsigned long new_begin;
17523 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17524 *begin = new_begin;
17525 }
17526 } else {
17527 - *begin = TASK_UNMAPPED_BASE;
17528 + *begin = mm->mmap_base;
17529 *end = TASK_SIZE;
17530 }
17531 }
17532 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17533 if (flags & MAP_FIXED)
17534 return addr;
17535
17536 - find_start_end(flags, &begin, &end);
17537 + find_start_end(mm, flags, &begin, &end);
17538
17539 if (len > end)
17540 return -ENOMEM;
17541
17542 +#ifdef CONFIG_PAX_RANDMMAP
17543 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17544 +#endif
17545 +
17546 if (addr) {
17547 addr = PAGE_ALIGN(addr);
17548 vma = find_vma(mm, addr);
17549 - if (end - len >= addr &&
17550 - (!vma || addr + len <= vma->vm_start))
17551 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17552 return addr;
17553 }
17554 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17555 @@ -172,7 +175,7 @@ full_search:
17556 }
17557 return -ENOMEM;
17558 }
17559 - if (!vma || addr + len <= vma->vm_start) {
17560 + if (check_heap_stack_gap(vma, addr, len)) {
17561 /*
17562 * Remember the place where we stopped the search:
17563 */
17564 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17565 {
17566 struct vm_area_struct *vma;
17567 struct mm_struct *mm = current->mm;
17568 - unsigned long addr = addr0;
17569 + unsigned long base = mm->mmap_base, addr = addr0;
17570
17571 /* requested length too big for entire address space */
17572 if (len > TASK_SIZE)
17573 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17574 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17575 goto bottomup;
17576
17577 +#ifdef CONFIG_PAX_RANDMMAP
17578 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17579 +#endif
17580 +
17581 /* requesting a specific address */
17582 if (addr) {
17583 addr = PAGE_ALIGN(addr);
17584 - vma = find_vma(mm, addr);
17585 - if (TASK_SIZE - len >= addr &&
17586 - (!vma || addr + len <= vma->vm_start))
17587 - return addr;
17588 + if (TASK_SIZE - len >= addr) {
17589 + vma = find_vma(mm, addr);
17590 + if (check_heap_stack_gap(vma, addr, len))
17591 + return addr;
17592 + }
17593 }
17594
17595 /* check if free_area_cache is useful for us */
17596 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17597 ALIGN_TOPDOWN);
17598
17599 vma = find_vma(mm, tmp_addr);
17600 - if (!vma || tmp_addr + len <= vma->vm_start)
17601 + if (check_heap_stack_gap(vma, tmp_addr, len))
17602 /* remember the address as a hint for next time */
17603 return mm->free_area_cache = tmp_addr;
17604 }
17605 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17606 * return with success:
17607 */
17608 vma = find_vma(mm, addr);
17609 - if (!vma || addr+len <= vma->vm_start)
17610 + if (check_heap_stack_gap(vma, addr, len))
17611 /* remember the address as a hint for next time */
17612 return mm->free_area_cache = addr;
17613
17614 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17615 mm->cached_hole_size = vma->vm_start - addr;
17616
17617 /* try just below the current vma->vm_start */
17618 - addr = vma->vm_start-len;
17619 - } while (len < vma->vm_start);
17620 + addr = skip_heap_stack_gap(vma, len);
17621 + } while (!IS_ERR_VALUE(addr));
17622
17623 bottomup:
17624 /*
17625 @@ -270,13 +278,21 @@ bottomup:
17626 * can happen with large stack limits and large mmap()
17627 * allocations.
17628 */
17629 + mm->mmap_base = TASK_UNMAPPED_BASE;
17630 +
17631 +#ifdef CONFIG_PAX_RANDMMAP
17632 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17633 + mm->mmap_base += mm->delta_mmap;
17634 +#endif
17635 +
17636 + mm->free_area_cache = mm->mmap_base;
17637 mm->cached_hole_size = ~0UL;
17638 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17639 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17640 /*
17641 * Restore the topdown base:
17642 */
17643 - mm->free_area_cache = mm->mmap_base;
17644 + mm->mmap_base = base;
17645 + mm->free_area_cache = base;
17646 mm->cached_hole_size = ~0UL;
17647
17648 return addr;
17649 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17650 index 9a0e312..e6f66f2 100644
17651 --- a/arch/x86/kernel/syscall_table_32.S
17652 +++ b/arch/x86/kernel/syscall_table_32.S
17653 @@ -1,3 +1,4 @@
17654 +.section .rodata,"a",@progbits
17655 ENTRY(sys_call_table)
17656 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17657 .long sys_exit
17658 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17659 index e2410e2..4fe3fbc 100644
17660 --- a/arch/x86/kernel/tboot.c
17661 +++ b/arch/x86/kernel/tboot.c
17662 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17663
17664 void tboot_shutdown(u32 shutdown_type)
17665 {
17666 - void (*shutdown)(void);
17667 + void (* __noreturn shutdown)(void);
17668
17669 if (!tboot_enabled())
17670 return;
17671 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17672
17673 switch_to_tboot_pt();
17674
17675 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17676 + shutdown = (void *)tboot->shutdown_entry;
17677 shutdown();
17678
17679 /* should not reach here */
17680 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17681 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17682 }
17683
17684 -static atomic_t ap_wfs_count;
17685 +static atomic_unchecked_t ap_wfs_count;
17686
17687 static int tboot_wait_for_aps(int num_aps)
17688 {
17689 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17690 {
17691 switch (action) {
17692 case CPU_DYING:
17693 - atomic_inc(&ap_wfs_count);
17694 + atomic_inc_unchecked(&ap_wfs_count);
17695 if (num_online_cpus() == 1)
17696 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17697 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17698 return NOTIFY_BAD;
17699 break;
17700 }
17701 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17702
17703 tboot_create_trampoline();
17704
17705 - atomic_set(&ap_wfs_count, 0);
17706 + atomic_set_unchecked(&ap_wfs_count, 0);
17707 register_hotcpu_notifier(&tboot_cpu_notifier);
17708 return 0;
17709 }
17710 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17711 index dd5fbf4..b7f2232 100644
17712 --- a/arch/x86/kernel/time.c
17713 +++ b/arch/x86/kernel/time.c
17714 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17715 {
17716 unsigned long pc = instruction_pointer(regs);
17717
17718 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17719 + if (!user_mode(regs) && in_lock_functions(pc)) {
17720 #ifdef CONFIG_FRAME_POINTER
17721 - return *(unsigned long *)(regs->bp + sizeof(long));
17722 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17723 #else
17724 unsigned long *sp =
17725 (unsigned long *)kernel_stack_pointer(regs);
17726 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17727 * or above a saved flags. Eflags has bits 22-31 zero,
17728 * kernel addresses don't.
17729 */
17730 +
17731 +#ifdef CONFIG_PAX_KERNEXEC
17732 + return ktla_ktva(sp[0]);
17733 +#else
17734 if (sp[0] >> 22)
17735 return sp[0];
17736 if (sp[1] >> 22)
17737 return sp[1];
17738 #endif
17739 +
17740 +#endif
17741 }
17742 return pc;
17743 }
17744 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17745 index 6bb7b85..dd853e1 100644
17746 --- a/arch/x86/kernel/tls.c
17747 +++ b/arch/x86/kernel/tls.c
17748 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17749 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17750 return -EINVAL;
17751
17752 +#ifdef CONFIG_PAX_SEGMEXEC
17753 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17754 + return -EINVAL;
17755 +#endif
17756 +
17757 set_tls_desc(p, idx, &info, 1);
17758
17759 return 0;
17760 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17761 index 451c0a7..e57f551 100644
17762 --- a/arch/x86/kernel/trampoline_32.S
17763 +++ b/arch/x86/kernel/trampoline_32.S
17764 @@ -32,6 +32,12 @@
17765 #include <asm/segment.h>
17766 #include <asm/page_types.h>
17767
17768 +#ifdef CONFIG_PAX_KERNEXEC
17769 +#define ta(X) (X)
17770 +#else
17771 +#define ta(X) ((X) - __PAGE_OFFSET)
17772 +#endif
17773 +
17774 #ifdef CONFIG_SMP
17775
17776 .section ".x86_trampoline","a"
17777 @@ -62,7 +68,7 @@ r_base = .
17778 inc %ax # protected mode (PE) bit
17779 lmsw %ax # into protected mode
17780 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17781 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17782 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17783
17784 # These need to be in the same 64K segment as the above;
17785 # hence we don't use the boot_gdt_descr defined in head.S
17786 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17787 index 09ff517..df19fbff 100644
17788 --- a/arch/x86/kernel/trampoline_64.S
17789 +++ b/arch/x86/kernel/trampoline_64.S
17790 @@ -90,7 +90,7 @@ startup_32:
17791 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17792 movl %eax, %ds
17793
17794 - movl $X86_CR4_PAE, %eax
17795 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17796 movl %eax, %cr4 # Enable PAE mode
17797
17798 # Setup trampoline 4 level pagetables
17799 @@ -138,7 +138,7 @@ tidt:
17800 # so the kernel can live anywhere
17801 .balign 4
17802 tgdt:
17803 - .short tgdt_end - tgdt # gdt limit
17804 + .short tgdt_end - tgdt - 1 # gdt limit
17805 .long tgdt - r_base
17806 .short 0
17807 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17808 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17809 index 31d9d0f..e244dd9 100644
17810 --- a/arch/x86/kernel/traps.c
17811 +++ b/arch/x86/kernel/traps.c
17812 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17813
17814 /* Do we ignore FPU interrupts ? */
17815 char ignore_fpu_irq;
17816 -
17817 -/*
17818 - * The IDT has to be page-aligned to simplify the Pentium
17819 - * F0 0F bug workaround.
17820 - */
17821 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17822 #endif
17823
17824 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17825 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17826 }
17827
17828 static void __kprobes
17829 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17830 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17831 long error_code, siginfo_t *info)
17832 {
17833 struct task_struct *tsk = current;
17834
17835 #ifdef CONFIG_X86_32
17836 - if (regs->flags & X86_VM_MASK) {
17837 + if (v8086_mode(regs)) {
17838 /*
17839 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17840 * On nmi (interrupt 2), do_trap should not be called.
17841 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17842 }
17843 #endif
17844
17845 - if (!user_mode(regs))
17846 + if (!user_mode_novm(regs))
17847 goto kernel_trap;
17848
17849 #ifdef CONFIG_X86_32
17850 @@ -148,7 +142,7 @@ trap_signal:
17851 printk_ratelimit()) {
17852 printk(KERN_INFO
17853 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17854 - tsk->comm, tsk->pid, str,
17855 + tsk->comm, task_pid_nr(tsk), str,
17856 regs->ip, regs->sp, error_code);
17857 print_vma_addr(" in ", regs->ip);
17858 printk("\n");
17859 @@ -165,8 +159,20 @@ kernel_trap:
17860 if (!fixup_exception(regs)) {
17861 tsk->thread.error_code = error_code;
17862 tsk->thread.trap_no = trapnr;
17863 +
17864 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17865 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17866 + str = "PAX: suspicious stack segment fault";
17867 +#endif
17868 +
17869 die(str, regs, error_code);
17870 }
17871 +
17872 +#ifdef CONFIG_PAX_REFCOUNT
17873 + if (trapnr == 4)
17874 + pax_report_refcount_overflow(regs);
17875 +#endif
17876 +
17877 return;
17878
17879 #ifdef CONFIG_X86_32
17880 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17881 conditional_sti(regs);
17882
17883 #ifdef CONFIG_X86_32
17884 - if (regs->flags & X86_VM_MASK)
17885 + if (v8086_mode(regs))
17886 goto gp_in_vm86;
17887 #endif
17888
17889 tsk = current;
17890 - if (!user_mode(regs))
17891 + if (!user_mode_novm(regs))
17892 goto gp_in_kernel;
17893
17894 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17895 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17896 + struct mm_struct *mm = tsk->mm;
17897 + unsigned long limit;
17898 +
17899 + down_write(&mm->mmap_sem);
17900 + limit = mm->context.user_cs_limit;
17901 + if (limit < TASK_SIZE) {
17902 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17903 + up_write(&mm->mmap_sem);
17904 + return;
17905 + }
17906 + up_write(&mm->mmap_sem);
17907 + }
17908 +#endif
17909 +
17910 tsk->thread.error_code = error_code;
17911 tsk->thread.trap_no = 13;
17912
17913 @@ -295,6 +317,13 @@ gp_in_kernel:
17914 if (notify_die(DIE_GPF, "general protection fault", regs,
17915 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17916 return;
17917 +
17918 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17919 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17920 + die("PAX: suspicious general protection fault", regs, error_code);
17921 + else
17922 +#endif
17923 +
17924 die("general protection fault", regs, error_code);
17925 }
17926
17927 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17928 /* It's safe to allow irq's after DR6 has been saved */
17929 preempt_conditional_sti(regs);
17930
17931 - if (regs->flags & X86_VM_MASK) {
17932 + if (v8086_mode(regs)) {
17933 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17934 error_code, 1);
17935 preempt_conditional_cli(regs);
17936 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17937 * We already checked v86 mode above, so we can check for kernel mode
17938 * by just checking the CPL of CS.
17939 */
17940 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17941 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17942 tsk->thread.debugreg6 &= ~DR_STEP;
17943 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17944 regs->flags &= ~X86_EFLAGS_TF;
17945 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17946 return;
17947 conditional_sti(regs);
17948
17949 - if (!user_mode_vm(regs))
17950 + if (!user_mode(regs))
17951 {
17952 if (!fixup_exception(regs)) {
17953 task->thread.error_code = error_code;
17954 @@ -569,8 +598,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17955 void __math_state_restore(struct task_struct *tsk)
17956 {
17957 /* We need a safe address that is cheap to find and that is already
17958 - in L1. We've just brought in "tsk->thread.has_fpu", so use that */
17959 -#define safe_address (tsk->thread.has_fpu)
17960 + in L1. */
17961 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
17962
17963 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
17964 is pending. Clear the x87 state here by setting it to fixed
17965 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17966 index b9242ba..50c5edd 100644
17967 --- a/arch/x86/kernel/verify_cpu.S
17968 +++ b/arch/x86/kernel/verify_cpu.S
17969 @@ -20,6 +20,7 @@
17970 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17971 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17972 * arch/x86/kernel/head_32.S: processor startup
17973 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17974 *
17975 * verify_cpu, returns the status of longmode and SSE in register %eax.
17976 * 0: Success 1: Failure
17977 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17978 index 863f875..4307295 100644
17979 --- a/arch/x86/kernel/vm86_32.c
17980 +++ b/arch/x86/kernel/vm86_32.c
17981 @@ -41,6 +41,7 @@
17982 #include <linux/ptrace.h>
17983 #include <linux/audit.h>
17984 #include <linux/stddef.h>
17985 +#include <linux/grsecurity.h>
17986
17987 #include <asm/uaccess.h>
17988 #include <asm/io.h>
17989 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17990 do_exit(SIGSEGV);
17991 }
17992
17993 - tss = &per_cpu(init_tss, get_cpu());
17994 + tss = init_tss + get_cpu();
17995 current->thread.sp0 = current->thread.saved_sp0;
17996 current->thread.sysenter_cs = __KERNEL_CS;
17997 load_sp0(tss, &current->thread);
17998 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17999 struct task_struct *tsk;
18000 int tmp, ret = -EPERM;
18001
18002 +#ifdef CONFIG_GRKERNSEC_VM86
18003 + if (!capable(CAP_SYS_RAWIO)) {
18004 + gr_handle_vm86();
18005 + goto out;
18006 + }
18007 +#endif
18008 +
18009 tsk = current;
18010 if (tsk->thread.saved_sp0)
18011 goto out;
18012 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
18013 int tmp, ret;
18014 struct vm86plus_struct __user *v86;
18015
18016 +#ifdef CONFIG_GRKERNSEC_VM86
18017 + if (!capable(CAP_SYS_RAWIO)) {
18018 + gr_handle_vm86();
18019 + ret = -EPERM;
18020 + goto out;
18021 + }
18022 +#endif
18023 +
18024 tsk = current;
18025 switch (cmd) {
18026 case VM86_REQUEST_IRQ:
18027 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
18028 tsk->thread.saved_fs = info->regs32->fs;
18029 tsk->thread.saved_gs = get_user_gs(info->regs32);
18030
18031 - tss = &per_cpu(init_tss, get_cpu());
18032 + tss = init_tss + get_cpu();
18033 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18034 if (cpu_has_sep)
18035 tsk->thread.sysenter_cs = 0;
18036 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
18037 goto cannot_handle;
18038 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18039 goto cannot_handle;
18040 - intr_ptr = (unsigned long __user *) (i << 2);
18041 + intr_ptr = (__force unsigned long __user *) (i << 2);
18042 if (get_user(segoffs, intr_ptr))
18043 goto cannot_handle;
18044 if ((segoffs >> 16) == BIOSSEG)
18045 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
18046 index 0f703f1..9e15f64 100644
18047 --- a/arch/x86/kernel/vmlinux.lds.S
18048 +++ b/arch/x86/kernel/vmlinux.lds.S
18049 @@ -26,6 +26,13 @@
18050 #include <asm/page_types.h>
18051 #include <asm/cache.h>
18052 #include <asm/boot.h>
18053 +#include <asm/segment.h>
18054 +
18055 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18056 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18057 +#else
18058 +#define __KERNEL_TEXT_OFFSET 0
18059 +#endif
18060
18061 #undef i386 /* in case the preprocessor is a 32bit one */
18062
18063 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
18064
18065 PHDRS {
18066 text PT_LOAD FLAGS(5); /* R_E */
18067 +#ifdef CONFIG_X86_32
18068 + module PT_LOAD FLAGS(5); /* R_E */
18069 +#endif
18070 +#ifdef CONFIG_XEN
18071 + rodata PT_LOAD FLAGS(5); /* R_E */
18072 +#else
18073 + rodata PT_LOAD FLAGS(4); /* R__ */
18074 +#endif
18075 data PT_LOAD FLAGS(6); /* RW_ */
18076 -#ifdef CONFIG_X86_64
18077 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18078 #ifdef CONFIG_SMP
18079 percpu PT_LOAD FLAGS(6); /* RW_ */
18080 #endif
18081 + text.init PT_LOAD FLAGS(5); /* R_E */
18082 + text.exit PT_LOAD FLAGS(5); /* R_E */
18083 init PT_LOAD FLAGS(7); /* RWE */
18084 -#endif
18085 note PT_NOTE FLAGS(0); /* ___ */
18086 }
18087
18088 SECTIONS
18089 {
18090 #ifdef CONFIG_X86_32
18091 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18092 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18093 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18094 #else
18095 - . = __START_KERNEL;
18096 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18097 + . = __START_KERNEL;
18098 #endif
18099
18100 /* Text and read-only data */
18101 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18102 - _text = .;
18103 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18104 /* bootstrapping code */
18105 +#ifdef CONFIG_X86_32
18106 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18107 +#else
18108 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18109 +#endif
18110 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18111 + _text = .;
18112 HEAD_TEXT
18113 #ifdef CONFIG_X86_32
18114 . = ALIGN(PAGE_SIZE);
18115 @@ -108,13 +128,47 @@ SECTIONS
18116 IRQENTRY_TEXT
18117 *(.fixup)
18118 *(.gnu.warning)
18119 - /* End of text section */
18120 - _etext = .;
18121 } :text = 0x9090
18122
18123 - NOTES :text :note
18124 + . += __KERNEL_TEXT_OFFSET;
18125
18126 - EXCEPTION_TABLE(16) :text = 0x9090
18127 +#ifdef CONFIG_X86_32
18128 + . = ALIGN(PAGE_SIZE);
18129 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18130 +
18131 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18132 + MODULES_EXEC_VADDR = .;
18133 + BYTE(0)
18134 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18135 + . = ALIGN(HPAGE_SIZE);
18136 + MODULES_EXEC_END = . - 1;
18137 +#endif
18138 +
18139 + } :module
18140 +#endif
18141 +
18142 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18143 + /* End of text section */
18144 + _etext = . - __KERNEL_TEXT_OFFSET;
18145 + }
18146 +
18147 +#ifdef CONFIG_X86_32
18148 + . = ALIGN(PAGE_SIZE);
18149 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18150 + *(.idt)
18151 + . = ALIGN(PAGE_SIZE);
18152 + *(.empty_zero_page)
18153 + *(.initial_pg_fixmap)
18154 + *(.initial_pg_pmd)
18155 + *(.initial_page_table)
18156 + *(.swapper_pg_dir)
18157 + } :rodata
18158 +#endif
18159 +
18160 + . = ALIGN(PAGE_SIZE);
18161 + NOTES :rodata :note
18162 +
18163 + EXCEPTION_TABLE(16) :rodata
18164
18165 #if defined(CONFIG_DEBUG_RODATA)
18166 /* .text should occupy whole number of pages */
18167 @@ -126,16 +180,20 @@ SECTIONS
18168
18169 /* Data */
18170 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18171 +
18172 +#ifdef CONFIG_PAX_KERNEXEC
18173 + . = ALIGN(HPAGE_SIZE);
18174 +#else
18175 + . = ALIGN(PAGE_SIZE);
18176 +#endif
18177 +
18178 /* Start of data section */
18179 _sdata = .;
18180
18181 /* init_task */
18182 INIT_TASK_DATA(THREAD_SIZE)
18183
18184 -#ifdef CONFIG_X86_32
18185 - /* 32 bit has nosave before _edata */
18186 NOSAVE_DATA
18187 -#endif
18188
18189 PAGE_ALIGNED_DATA(PAGE_SIZE)
18190
18191 @@ -176,12 +234,19 @@ SECTIONS
18192 #endif /* CONFIG_X86_64 */
18193
18194 /* Init code and data - will be freed after init */
18195 - . = ALIGN(PAGE_SIZE);
18196 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18197 + BYTE(0)
18198 +
18199 +#ifdef CONFIG_PAX_KERNEXEC
18200 + . = ALIGN(HPAGE_SIZE);
18201 +#else
18202 + . = ALIGN(PAGE_SIZE);
18203 +#endif
18204 +
18205 __init_begin = .; /* paired with __init_end */
18206 - }
18207 + } :init.begin
18208
18209 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18210 +#ifdef CONFIG_SMP
18211 /*
18212 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18213 * output PHDR, so the next output section - .init.text - should
18214 @@ -190,12 +255,27 @@ SECTIONS
18215 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
18216 #endif
18217
18218 - INIT_TEXT_SECTION(PAGE_SIZE)
18219 -#ifdef CONFIG_X86_64
18220 - :init
18221 -#endif
18222 + . = ALIGN(PAGE_SIZE);
18223 + init_begin = .;
18224 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18225 + VMLINUX_SYMBOL(_sinittext) = .;
18226 + INIT_TEXT
18227 + VMLINUX_SYMBOL(_einittext) = .;
18228 + . = ALIGN(PAGE_SIZE);
18229 + } :text.init
18230
18231 - INIT_DATA_SECTION(16)
18232 + /*
18233 + * .exit.text is discard at runtime, not link time, to deal with
18234 + * references from .altinstructions and .eh_frame
18235 + */
18236 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18237 + EXIT_TEXT
18238 + . = ALIGN(16);
18239 + } :text.exit
18240 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18241 +
18242 + . = ALIGN(PAGE_SIZE);
18243 + INIT_DATA_SECTION(16) :init
18244
18245 /*
18246 * Code and data for a variety of lowlevel trampolines, to be
18247 @@ -269,19 +349,12 @@ SECTIONS
18248 }
18249
18250 . = ALIGN(8);
18251 - /*
18252 - * .exit.text is discard at runtime, not link time, to deal with
18253 - * references from .altinstructions and .eh_frame
18254 - */
18255 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18256 - EXIT_TEXT
18257 - }
18258
18259 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18260 EXIT_DATA
18261 }
18262
18263 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18264 +#ifndef CONFIG_SMP
18265 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18266 #endif
18267
18268 @@ -300,16 +373,10 @@ SECTIONS
18269 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18270 __smp_locks = .;
18271 *(.smp_locks)
18272 - . = ALIGN(PAGE_SIZE);
18273 __smp_locks_end = .;
18274 + . = ALIGN(PAGE_SIZE);
18275 }
18276
18277 -#ifdef CONFIG_X86_64
18278 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18279 - NOSAVE_DATA
18280 - }
18281 -#endif
18282 -
18283 /* BSS */
18284 . = ALIGN(PAGE_SIZE);
18285 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18286 @@ -325,6 +392,7 @@ SECTIONS
18287 __brk_base = .;
18288 . += 64 * 1024; /* 64k alignment slop space */
18289 *(.brk_reservation) /* areas brk users have reserved */
18290 + . = ALIGN(HPAGE_SIZE);
18291 __brk_limit = .;
18292 }
18293
18294 @@ -351,13 +419,12 @@ SECTIONS
18295 * for the boot processor.
18296 */
18297 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18298 -INIT_PER_CPU(gdt_page);
18299 INIT_PER_CPU(irq_stack_union);
18300
18301 /*
18302 * Build-time check on the image size:
18303 */
18304 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18305 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18306 "kernel image bigger than KERNEL_IMAGE_SIZE");
18307
18308 #ifdef CONFIG_SMP
18309 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18310 index e4d4a22..47ee71f 100644
18311 --- a/arch/x86/kernel/vsyscall_64.c
18312 +++ b/arch/x86/kernel/vsyscall_64.c
18313 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18314 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18315 };
18316
18317 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18318 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18319
18320 static int __init vsyscall_setup(char *str)
18321 {
18322 if (str) {
18323 if (!strcmp("emulate", str))
18324 vsyscall_mode = EMULATE;
18325 - else if (!strcmp("native", str))
18326 - vsyscall_mode = NATIVE;
18327 else if (!strcmp("none", str))
18328 vsyscall_mode = NONE;
18329 else
18330 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18331
18332 tsk = current;
18333 if (seccomp_mode(&tsk->seccomp))
18334 - do_exit(SIGKILL);
18335 + do_group_exit(SIGKILL);
18336
18337 switch (vsyscall_nr) {
18338 case 0:
18339 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18340 return true;
18341
18342 sigsegv:
18343 - force_sig(SIGSEGV, current);
18344 - return true;
18345 + do_group_exit(SIGKILL);
18346 }
18347
18348 /*
18349 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18350 extern char __vvar_page;
18351 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18352
18353 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18354 - vsyscall_mode == NATIVE
18355 - ? PAGE_KERNEL_VSYSCALL
18356 - : PAGE_KERNEL_VVAR);
18357 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18358 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18359 (unsigned long)VSYSCALL_START);
18360
18361 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18362 index 9796c2f..f686fbf 100644
18363 --- a/arch/x86/kernel/x8664_ksyms_64.c
18364 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18365 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18366 EXPORT_SYMBOL(copy_user_generic_string);
18367 EXPORT_SYMBOL(copy_user_generic_unrolled);
18368 EXPORT_SYMBOL(__copy_user_nocache);
18369 -EXPORT_SYMBOL(_copy_from_user);
18370 -EXPORT_SYMBOL(_copy_to_user);
18371
18372 EXPORT_SYMBOL(copy_page);
18373 EXPORT_SYMBOL(clear_page);
18374 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18375 index 7110911..e8cdee5 100644
18376 --- a/arch/x86/kernel/xsave.c
18377 +++ b/arch/x86/kernel/xsave.c
18378 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18379 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18380 return -EINVAL;
18381
18382 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18383 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18384 fx_sw_user->extended_size -
18385 FP_XSTATE_MAGIC2_SIZE));
18386 if (err)
18387 @@ -266,7 +266,7 @@ fx_only:
18388 * the other extended state.
18389 */
18390 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18391 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18392 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18393 }
18394
18395 /*
18396 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
18397 if (use_xsave())
18398 err = restore_user_xstate(buf);
18399 else
18400 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18401 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18402 buf);
18403 if (unlikely(err)) {
18404 /*
18405 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18406 index f1e3be1..588efc8 100644
18407 --- a/arch/x86/kvm/emulate.c
18408 +++ b/arch/x86/kvm/emulate.c
18409 @@ -249,6 +249,7 @@ struct gprefix {
18410
18411 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18412 do { \
18413 + unsigned long _tmp; \
18414 __asm__ __volatile__ ( \
18415 _PRE_EFLAGS("0", "4", "2") \
18416 _op _suffix " %"_x"3,%1; " \
18417 @@ -263,8 +264,6 @@ struct gprefix {
18418 /* Raw emulation: instruction has two explicit operands. */
18419 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18420 do { \
18421 - unsigned long _tmp; \
18422 - \
18423 switch ((ctxt)->dst.bytes) { \
18424 case 2: \
18425 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18426 @@ -280,7 +279,6 @@ struct gprefix {
18427
18428 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18429 do { \
18430 - unsigned long _tmp; \
18431 switch ((ctxt)->dst.bytes) { \
18432 case 1: \
18433 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18434 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18435 index 54abb40..a192606 100644
18436 --- a/arch/x86/kvm/lapic.c
18437 +++ b/arch/x86/kvm/lapic.c
18438 @@ -53,7 +53,7 @@
18439 #define APIC_BUS_CYCLE_NS 1
18440
18441 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18442 -#define apic_debug(fmt, arg...)
18443 +#define apic_debug(fmt, arg...) do {} while (0)
18444
18445 #define APIC_LVT_NUM 6
18446 /* 14 is the version for Xeon and Pentium 8.4.8*/
18447 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18448 index f1b36cf..af8a124 100644
18449 --- a/arch/x86/kvm/mmu.c
18450 +++ b/arch/x86/kvm/mmu.c
18451 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18452
18453 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18454
18455 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18456 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18457
18458 /*
18459 * Assume that the pte write on a page table of the same type
18460 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18461 }
18462
18463 spin_lock(&vcpu->kvm->mmu_lock);
18464 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18465 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18466 gentry = 0;
18467 kvm_mmu_free_some_pages(vcpu);
18468 ++vcpu->kvm->stat.mmu_pte_write;
18469 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18470 index 9299410..ade2f9b 100644
18471 --- a/arch/x86/kvm/paging_tmpl.h
18472 +++ b/arch/x86/kvm/paging_tmpl.h
18473 @@ -197,7 +197,7 @@ retry_walk:
18474 if (unlikely(kvm_is_error_hva(host_addr)))
18475 goto error;
18476
18477 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18478 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18479 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18480 goto error;
18481
18482 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18483 if (need_flush)
18484 kvm_flush_remote_tlbs(vcpu->kvm);
18485
18486 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18487 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18488
18489 spin_unlock(&vcpu->kvm->mmu_lock);
18490
18491 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18492 index e32243e..a6e6172 100644
18493 --- a/arch/x86/kvm/svm.c
18494 +++ b/arch/x86/kvm/svm.c
18495 @@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18496 int cpu = raw_smp_processor_id();
18497
18498 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18499 +
18500 + pax_open_kernel();
18501 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18502 + pax_close_kernel();
18503 +
18504 load_TR_desc();
18505 }
18506
18507 @@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18508 #endif
18509 #endif
18510
18511 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18512 + __set_fs(current_thread_info()->addr_limit);
18513 +#endif
18514 +
18515 reload_tss(vcpu);
18516
18517 local_irq_disable();
18518 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18519 index 4ea7678..b3a7084 100644
18520 --- a/arch/x86/kvm/vmx.c
18521 +++ b/arch/x86/kvm/vmx.c
18522 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
18523 struct desc_struct *descs;
18524
18525 descs = (void *)gdt->address;
18526 +
18527 + pax_open_kernel();
18528 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18529 + pax_close_kernel();
18530 +
18531 load_TR_desc();
18532 }
18533
18534 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18535 if (!cpu_has_vmx_flexpriority())
18536 flexpriority_enabled = 0;
18537
18538 - if (!cpu_has_vmx_tpr_shadow())
18539 - kvm_x86_ops->update_cr8_intercept = NULL;
18540 + if (!cpu_has_vmx_tpr_shadow()) {
18541 + pax_open_kernel();
18542 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18543 + pax_close_kernel();
18544 + }
18545
18546 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18547 kvm_disable_largepages();
18548 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18549 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18550
18551 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18552 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18553 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18554
18555 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18556 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18557 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18558 "jmp .Lkvm_vmx_return \n\t"
18559 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18560 ".Lkvm_vmx_return: "
18561 +
18562 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18563 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18564 + ".Lkvm_vmx_return2: "
18565 +#endif
18566 +
18567 /* Save guest registers, load host registers, keep flags */
18568 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18569 "pop %0 \n\t"
18570 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18571 #endif
18572 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18573 [wordsize]"i"(sizeof(ulong))
18574 +
18575 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18576 + ,[cs]"i"(__KERNEL_CS)
18577 +#endif
18578 +
18579 : "cc", "memory"
18580 , R"ax", R"bx", R"di", R"si"
18581 #ifdef CONFIG_X86_64
18582 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18583 }
18584 }
18585
18586 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18587 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18588 +
18589 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18590 + loadsegment(fs, __KERNEL_PERCPU);
18591 +#endif
18592 +
18593 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18594 + __set_fs(current_thread_info()->addr_limit);
18595 +#endif
18596 +
18597 vmx->loaded_vmcs->launched = 1;
18598
18599 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18600 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18601 index 4c938da..4ddef65 100644
18602 --- a/arch/x86/kvm/x86.c
18603 +++ b/arch/x86/kvm/x86.c
18604 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18605 {
18606 struct kvm *kvm = vcpu->kvm;
18607 int lm = is_long_mode(vcpu);
18608 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18609 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18610 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18611 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18612 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18613 : kvm->arch.xen_hvm_config.blob_size_32;
18614 u32 page_num = data & ~PAGE_MASK;
18615 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18616 if (n < msr_list.nmsrs)
18617 goto out;
18618 r = -EFAULT;
18619 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18620 + goto out;
18621 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18622 num_msrs_to_save * sizeof(u32)))
18623 goto out;
18624 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18625 struct kvm_cpuid2 *cpuid,
18626 struct kvm_cpuid_entry2 __user *entries)
18627 {
18628 - int r;
18629 + int r, i;
18630
18631 r = -E2BIG;
18632 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18633 goto out;
18634 r = -EFAULT;
18635 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18636 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18637 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18638 goto out;
18639 + for (i = 0; i < cpuid->nent; ++i) {
18640 + struct kvm_cpuid_entry2 cpuid_entry;
18641 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18642 + goto out;
18643 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18644 + }
18645 vcpu->arch.cpuid_nent = cpuid->nent;
18646 kvm_apic_set_version(vcpu);
18647 kvm_x86_ops->cpuid_update(vcpu);
18648 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18649 struct kvm_cpuid2 *cpuid,
18650 struct kvm_cpuid_entry2 __user *entries)
18651 {
18652 - int r;
18653 + int r, i;
18654
18655 r = -E2BIG;
18656 if (cpuid->nent < vcpu->arch.cpuid_nent)
18657 goto out;
18658 r = -EFAULT;
18659 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18660 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18661 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18662 goto out;
18663 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18664 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18665 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18666 + goto out;
18667 + }
18668 return 0;
18669
18670 out:
18671 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18672 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18673 struct kvm_interrupt *irq)
18674 {
18675 - if (irq->irq < 0 || irq->irq >= 256)
18676 + if (irq->irq >= 256)
18677 return -EINVAL;
18678 if (irqchip_in_kernel(vcpu->kvm))
18679 return -ENXIO;
18680 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18681 kvm_mmu_set_mmio_spte_mask(mask);
18682 }
18683
18684 -int kvm_arch_init(void *opaque)
18685 +int kvm_arch_init(const void *opaque)
18686 {
18687 int r;
18688 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18689 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18690 index cf4603b..7cdde38 100644
18691 --- a/arch/x86/lguest/boot.c
18692 +++ b/arch/x86/lguest/boot.c
18693 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18694 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18695 * Launcher to reboot us.
18696 */
18697 -static void lguest_restart(char *reason)
18698 +static __noreturn void lguest_restart(char *reason)
18699 {
18700 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18701 + BUG();
18702 }
18703
18704 /*G:050
18705 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18706 index 042f682..c92afb6 100644
18707 --- a/arch/x86/lib/atomic64_32.c
18708 +++ b/arch/x86/lib/atomic64_32.c
18709 @@ -8,18 +8,30 @@
18710
18711 long long atomic64_read_cx8(long long, const atomic64_t *v);
18712 EXPORT_SYMBOL(atomic64_read_cx8);
18713 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18714 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18715 long long atomic64_set_cx8(long long, const atomic64_t *v);
18716 EXPORT_SYMBOL(atomic64_set_cx8);
18717 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18718 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18719 long long atomic64_xchg_cx8(long long, unsigned high);
18720 EXPORT_SYMBOL(atomic64_xchg_cx8);
18721 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18722 EXPORT_SYMBOL(atomic64_add_return_cx8);
18723 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18724 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18725 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18726 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18727 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18728 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18729 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18730 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18731 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18732 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18733 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18734 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18735 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18736 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18737 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18738 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18739 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18740 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18741 #ifndef CONFIG_X86_CMPXCHG64
18742 long long atomic64_read_386(long long, const atomic64_t *v);
18743 EXPORT_SYMBOL(atomic64_read_386);
18744 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18745 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18746 long long atomic64_set_386(long long, const atomic64_t *v);
18747 EXPORT_SYMBOL(atomic64_set_386);
18748 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18749 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18750 long long atomic64_xchg_386(long long, unsigned high);
18751 EXPORT_SYMBOL(atomic64_xchg_386);
18752 long long atomic64_add_return_386(long long a, atomic64_t *v);
18753 EXPORT_SYMBOL(atomic64_add_return_386);
18754 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18755 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18756 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18757 EXPORT_SYMBOL(atomic64_sub_return_386);
18758 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18759 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18760 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18761 EXPORT_SYMBOL(atomic64_inc_return_386);
18762 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18763 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18764 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18765 EXPORT_SYMBOL(atomic64_dec_return_386);
18766 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18767 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18768 long long atomic64_add_386(long long a, atomic64_t *v);
18769 EXPORT_SYMBOL(atomic64_add_386);
18770 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18771 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18772 long long atomic64_sub_386(long long a, atomic64_t *v);
18773 EXPORT_SYMBOL(atomic64_sub_386);
18774 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18775 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18776 long long atomic64_inc_386(long long a, atomic64_t *v);
18777 EXPORT_SYMBOL(atomic64_inc_386);
18778 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18779 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18780 long long atomic64_dec_386(long long a, atomic64_t *v);
18781 EXPORT_SYMBOL(atomic64_dec_386);
18782 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18783 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18784 long long atomic64_dec_if_positive_386(atomic64_t *v);
18785 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18786 int atomic64_inc_not_zero_386(atomic64_t *v);
18787 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18788 index e8e7e0d..56fd1b0 100644
18789 --- a/arch/x86/lib/atomic64_386_32.S
18790 +++ b/arch/x86/lib/atomic64_386_32.S
18791 @@ -48,6 +48,10 @@ BEGIN(read)
18792 movl (v), %eax
18793 movl 4(v), %edx
18794 RET_ENDP
18795 +BEGIN(read_unchecked)
18796 + movl (v), %eax
18797 + movl 4(v), %edx
18798 +RET_ENDP
18799 #undef v
18800
18801 #define v %esi
18802 @@ -55,6 +59,10 @@ BEGIN(set)
18803 movl %ebx, (v)
18804 movl %ecx, 4(v)
18805 RET_ENDP
18806 +BEGIN(set_unchecked)
18807 + movl %ebx, (v)
18808 + movl %ecx, 4(v)
18809 +RET_ENDP
18810 #undef v
18811
18812 #define v %esi
18813 @@ -70,6 +78,20 @@ RET_ENDP
18814 BEGIN(add)
18815 addl %eax, (v)
18816 adcl %edx, 4(v)
18817 +
18818 +#ifdef CONFIG_PAX_REFCOUNT
18819 + jno 0f
18820 + subl %eax, (v)
18821 + sbbl %edx, 4(v)
18822 + int $4
18823 +0:
18824 + _ASM_EXTABLE(0b, 0b)
18825 +#endif
18826 +
18827 +RET_ENDP
18828 +BEGIN(add_unchecked)
18829 + addl %eax, (v)
18830 + adcl %edx, 4(v)
18831 RET_ENDP
18832 #undef v
18833
18834 @@ -77,6 +99,24 @@ RET_ENDP
18835 BEGIN(add_return)
18836 addl (v), %eax
18837 adcl 4(v), %edx
18838 +
18839 +#ifdef CONFIG_PAX_REFCOUNT
18840 + into
18841 +1234:
18842 + _ASM_EXTABLE(1234b, 2f)
18843 +#endif
18844 +
18845 + movl %eax, (v)
18846 + movl %edx, 4(v)
18847 +
18848 +#ifdef CONFIG_PAX_REFCOUNT
18849 +2:
18850 +#endif
18851 +
18852 +RET_ENDP
18853 +BEGIN(add_return_unchecked)
18854 + addl (v), %eax
18855 + adcl 4(v), %edx
18856 movl %eax, (v)
18857 movl %edx, 4(v)
18858 RET_ENDP
18859 @@ -86,6 +126,20 @@ RET_ENDP
18860 BEGIN(sub)
18861 subl %eax, (v)
18862 sbbl %edx, 4(v)
18863 +
18864 +#ifdef CONFIG_PAX_REFCOUNT
18865 + jno 0f
18866 + addl %eax, (v)
18867 + adcl %edx, 4(v)
18868 + int $4
18869 +0:
18870 + _ASM_EXTABLE(0b, 0b)
18871 +#endif
18872 +
18873 +RET_ENDP
18874 +BEGIN(sub_unchecked)
18875 + subl %eax, (v)
18876 + sbbl %edx, 4(v)
18877 RET_ENDP
18878 #undef v
18879
18880 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18881 sbbl $0, %edx
18882 addl (v), %eax
18883 adcl 4(v), %edx
18884 +
18885 +#ifdef CONFIG_PAX_REFCOUNT
18886 + into
18887 +1234:
18888 + _ASM_EXTABLE(1234b, 2f)
18889 +#endif
18890 +
18891 + movl %eax, (v)
18892 + movl %edx, 4(v)
18893 +
18894 +#ifdef CONFIG_PAX_REFCOUNT
18895 +2:
18896 +#endif
18897 +
18898 +RET_ENDP
18899 +BEGIN(sub_return_unchecked)
18900 + negl %edx
18901 + negl %eax
18902 + sbbl $0, %edx
18903 + addl (v), %eax
18904 + adcl 4(v), %edx
18905 movl %eax, (v)
18906 movl %edx, 4(v)
18907 RET_ENDP
18908 @@ -105,6 +180,20 @@ RET_ENDP
18909 BEGIN(inc)
18910 addl $1, (v)
18911 adcl $0, 4(v)
18912 +
18913 +#ifdef CONFIG_PAX_REFCOUNT
18914 + jno 0f
18915 + subl $1, (v)
18916 + sbbl $0, 4(v)
18917 + int $4
18918 +0:
18919 + _ASM_EXTABLE(0b, 0b)
18920 +#endif
18921 +
18922 +RET_ENDP
18923 +BEGIN(inc_unchecked)
18924 + addl $1, (v)
18925 + adcl $0, 4(v)
18926 RET_ENDP
18927 #undef v
18928
18929 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18930 movl 4(v), %edx
18931 addl $1, %eax
18932 adcl $0, %edx
18933 +
18934 +#ifdef CONFIG_PAX_REFCOUNT
18935 + into
18936 +1234:
18937 + _ASM_EXTABLE(1234b, 2f)
18938 +#endif
18939 +
18940 + movl %eax, (v)
18941 + movl %edx, 4(v)
18942 +
18943 +#ifdef CONFIG_PAX_REFCOUNT
18944 +2:
18945 +#endif
18946 +
18947 +RET_ENDP
18948 +BEGIN(inc_return_unchecked)
18949 + movl (v), %eax
18950 + movl 4(v), %edx
18951 + addl $1, %eax
18952 + adcl $0, %edx
18953 movl %eax, (v)
18954 movl %edx, 4(v)
18955 RET_ENDP
18956 @@ -123,6 +232,20 @@ RET_ENDP
18957 BEGIN(dec)
18958 subl $1, (v)
18959 sbbl $0, 4(v)
18960 +
18961 +#ifdef CONFIG_PAX_REFCOUNT
18962 + jno 0f
18963 + addl $1, (v)
18964 + adcl $0, 4(v)
18965 + int $4
18966 +0:
18967 + _ASM_EXTABLE(0b, 0b)
18968 +#endif
18969 +
18970 +RET_ENDP
18971 +BEGIN(dec_unchecked)
18972 + subl $1, (v)
18973 + sbbl $0, 4(v)
18974 RET_ENDP
18975 #undef v
18976
18977 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18978 movl 4(v), %edx
18979 subl $1, %eax
18980 sbbl $0, %edx
18981 +
18982 +#ifdef CONFIG_PAX_REFCOUNT
18983 + into
18984 +1234:
18985 + _ASM_EXTABLE(1234b, 2f)
18986 +#endif
18987 +
18988 + movl %eax, (v)
18989 + movl %edx, 4(v)
18990 +
18991 +#ifdef CONFIG_PAX_REFCOUNT
18992 +2:
18993 +#endif
18994 +
18995 +RET_ENDP
18996 +BEGIN(dec_return_unchecked)
18997 + movl (v), %eax
18998 + movl 4(v), %edx
18999 + subl $1, %eax
19000 + sbbl $0, %edx
19001 movl %eax, (v)
19002 movl %edx, 4(v)
19003 RET_ENDP
19004 @@ -143,6 +286,13 @@ BEGIN(add_unless)
19005 adcl %edx, %edi
19006 addl (v), %eax
19007 adcl 4(v), %edx
19008 +
19009 +#ifdef CONFIG_PAX_REFCOUNT
19010 + into
19011 +1234:
19012 + _ASM_EXTABLE(1234b, 2f)
19013 +#endif
19014 +
19015 cmpl %eax, %esi
19016 je 3f
19017 1:
19018 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
19019 1:
19020 addl $1, %eax
19021 adcl $0, %edx
19022 +
19023 +#ifdef CONFIG_PAX_REFCOUNT
19024 + into
19025 +1234:
19026 + _ASM_EXTABLE(1234b, 2f)
19027 +#endif
19028 +
19029 movl %eax, (v)
19030 movl %edx, 4(v)
19031 movl $1, %eax
19032 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
19033 movl 4(v), %edx
19034 subl $1, %eax
19035 sbbl $0, %edx
19036 +
19037 +#ifdef CONFIG_PAX_REFCOUNT
19038 + into
19039 +1234:
19040 + _ASM_EXTABLE(1234b, 1f)
19041 +#endif
19042 +
19043 js 1f
19044 movl %eax, (v)
19045 movl %edx, 4(v)
19046 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
19047 index 391a083..d658e9f 100644
19048 --- a/arch/x86/lib/atomic64_cx8_32.S
19049 +++ b/arch/x86/lib/atomic64_cx8_32.S
19050 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
19051 CFI_STARTPROC
19052
19053 read64 %ecx
19054 + pax_force_retaddr
19055 ret
19056 CFI_ENDPROC
19057 ENDPROC(atomic64_read_cx8)
19058
19059 +ENTRY(atomic64_read_unchecked_cx8)
19060 + CFI_STARTPROC
19061 +
19062 + read64 %ecx
19063 + pax_force_retaddr
19064 + ret
19065 + CFI_ENDPROC
19066 +ENDPROC(atomic64_read_unchecked_cx8)
19067 +
19068 ENTRY(atomic64_set_cx8)
19069 CFI_STARTPROC
19070
19071 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
19072 cmpxchg8b (%esi)
19073 jne 1b
19074
19075 + pax_force_retaddr
19076 ret
19077 CFI_ENDPROC
19078 ENDPROC(atomic64_set_cx8)
19079
19080 +ENTRY(atomic64_set_unchecked_cx8)
19081 + CFI_STARTPROC
19082 +
19083 +1:
19084 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
19085 + * are atomic on 586 and newer */
19086 + cmpxchg8b (%esi)
19087 + jne 1b
19088 +
19089 + pax_force_retaddr
19090 + ret
19091 + CFI_ENDPROC
19092 +ENDPROC(atomic64_set_unchecked_cx8)
19093 +
19094 ENTRY(atomic64_xchg_cx8)
19095 CFI_STARTPROC
19096
19097 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
19098 cmpxchg8b (%esi)
19099 jne 1b
19100
19101 + pax_force_retaddr
19102 ret
19103 CFI_ENDPROC
19104 ENDPROC(atomic64_xchg_cx8)
19105
19106 -.macro addsub_return func ins insc
19107 -ENTRY(atomic64_\func\()_return_cx8)
19108 +.macro addsub_return func ins insc unchecked=""
19109 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19110 CFI_STARTPROC
19111 SAVE ebp
19112 SAVE ebx
19113 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
19114 movl %edx, %ecx
19115 \ins\()l %esi, %ebx
19116 \insc\()l %edi, %ecx
19117 +
19118 +.ifb \unchecked
19119 +#ifdef CONFIG_PAX_REFCOUNT
19120 + into
19121 +2:
19122 + _ASM_EXTABLE(2b, 3f)
19123 +#endif
19124 +.endif
19125 +
19126 LOCK_PREFIX
19127 cmpxchg8b (%ebp)
19128 jne 1b
19129 -
19130 -10:
19131 movl %ebx, %eax
19132 movl %ecx, %edx
19133 +
19134 +.ifb \unchecked
19135 +#ifdef CONFIG_PAX_REFCOUNT
19136 +3:
19137 +#endif
19138 +.endif
19139 +
19140 RESTORE edi
19141 RESTORE esi
19142 RESTORE ebx
19143 RESTORE ebp
19144 + pax_force_retaddr
19145 ret
19146 CFI_ENDPROC
19147 -ENDPROC(atomic64_\func\()_return_cx8)
19148 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19149 .endm
19150
19151 addsub_return add add adc
19152 addsub_return sub sub sbb
19153 +addsub_return add add adc _unchecked
19154 +addsub_return sub sub sbb _unchecked
19155
19156 -.macro incdec_return func ins insc
19157 -ENTRY(atomic64_\func\()_return_cx8)
19158 +.macro incdec_return func ins insc unchecked
19159 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19160 CFI_STARTPROC
19161 SAVE ebx
19162
19163 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
19164 movl %edx, %ecx
19165 \ins\()l $1, %ebx
19166 \insc\()l $0, %ecx
19167 +
19168 +.ifb \unchecked
19169 +#ifdef CONFIG_PAX_REFCOUNT
19170 + into
19171 +2:
19172 + _ASM_EXTABLE(2b, 3f)
19173 +#endif
19174 +.endif
19175 +
19176 LOCK_PREFIX
19177 cmpxchg8b (%esi)
19178 jne 1b
19179
19180 -10:
19181 movl %ebx, %eax
19182 movl %ecx, %edx
19183 +
19184 +.ifb \unchecked
19185 +#ifdef CONFIG_PAX_REFCOUNT
19186 +3:
19187 +#endif
19188 +.endif
19189 +
19190 RESTORE ebx
19191 + pax_force_retaddr
19192 ret
19193 CFI_ENDPROC
19194 -ENDPROC(atomic64_\func\()_return_cx8)
19195 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19196 .endm
19197
19198 incdec_return inc add adc
19199 incdec_return dec sub sbb
19200 +incdec_return inc add adc _unchecked
19201 +incdec_return dec sub sbb _unchecked
19202
19203 ENTRY(atomic64_dec_if_positive_cx8)
19204 CFI_STARTPROC
19205 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
19206 movl %edx, %ecx
19207 subl $1, %ebx
19208 sbb $0, %ecx
19209 +
19210 +#ifdef CONFIG_PAX_REFCOUNT
19211 + into
19212 +1234:
19213 + _ASM_EXTABLE(1234b, 2f)
19214 +#endif
19215 +
19216 js 2f
19217 LOCK_PREFIX
19218 cmpxchg8b (%esi)
19219 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
19220 movl %ebx, %eax
19221 movl %ecx, %edx
19222 RESTORE ebx
19223 + pax_force_retaddr
19224 ret
19225 CFI_ENDPROC
19226 ENDPROC(atomic64_dec_if_positive_cx8)
19227 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
19228 movl %edx, %ecx
19229 addl %esi, %ebx
19230 adcl %edi, %ecx
19231 +
19232 +#ifdef CONFIG_PAX_REFCOUNT
19233 + into
19234 +1234:
19235 + _ASM_EXTABLE(1234b, 3f)
19236 +#endif
19237 +
19238 LOCK_PREFIX
19239 cmpxchg8b (%ebp)
19240 jne 1b
19241 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19242 CFI_ADJUST_CFA_OFFSET -8
19243 RESTORE ebx
19244 RESTORE ebp
19245 + pax_force_retaddr
19246 ret
19247 4:
19248 cmpl %edx, 4(%esp)
19249 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19250 movl %edx, %ecx
19251 addl $1, %ebx
19252 adcl $0, %ecx
19253 +
19254 +#ifdef CONFIG_PAX_REFCOUNT
19255 + into
19256 +1234:
19257 + _ASM_EXTABLE(1234b, 3f)
19258 +#endif
19259 +
19260 LOCK_PREFIX
19261 cmpxchg8b (%esi)
19262 jne 1b
19263 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19264 movl $1, %eax
19265 3:
19266 RESTORE ebx
19267 + pax_force_retaddr
19268 ret
19269 4:
19270 testl %edx, %edx
19271 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19272 index 78d16a5..fbcf666 100644
19273 --- a/arch/x86/lib/checksum_32.S
19274 +++ b/arch/x86/lib/checksum_32.S
19275 @@ -28,7 +28,8 @@
19276 #include <linux/linkage.h>
19277 #include <asm/dwarf2.h>
19278 #include <asm/errno.h>
19279 -
19280 +#include <asm/segment.h>
19281 +
19282 /*
19283 * computes a partial checksum, e.g. for TCP/UDP fragments
19284 */
19285 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19286
19287 #define ARGBASE 16
19288 #define FP 12
19289 -
19290 -ENTRY(csum_partial_copy_generic)
19291 +
19292 +ENTRY(csum_partial_copy_generic_to_user)
19293 CFI_STARTPROC
19294 +
19295 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19296 + pushl_cfi %gs
19297 + popl_cfi %es
19298 + jmp csum_partial_copy_generic
19299 +#endif
19300 +
19301 +ENTRY(csum_partial_copy_generic_from_user)
19302 +
19303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19304 + pushl_cfi %gs
19305 + popl_cfi %ds
19306 +#endif
19307 +
19308 +ENTRY(csum_partial_copy_generic)
19309 subl $4,%esp
19310 CFI_ADJUST_CFA_OFFSET 4
19311 pushl_cfi %edi
19312 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19313 jmp 4f
19314 SRC(1: movw (%esi), %bx )
19315 addl $2, %esi
19316 -DST( movw %bx, (%edi) )
19317 +DST( movw %bx, %es:(%edi) )
19318 addl $2, %edi
19319 addw %bx, %ax
19320 adcl $0, %eax
19321 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19322 SRC(1: movl (%esi), %ebx )
19323 SRC( movl 4(%esi), %edx )
19324 adcl %ebx, %eax
19325 -DST( movl %ebx, (%edi) )
19326 +DST( movl %ebx, %es:(%edi) )
19327 adcl %edx, %eax
19328 -DST( movl %edx, 4(%edi) )
19329 +DST( movl %edx, %es:4(%edi) )
19330
19331 SRC( movl 8(%esi), %ebx )
19332 SRC( movl 12(%esi), %edx )
19333 adcl %ebx, %eax
19334 -DST( movl %ebx, 8(%edi) )
19335 +DST( movl %ebx, %es:8(%edi) )
19336 adcl %edx, %eax
19337 -DST( movl %edx, 12(%edi) )
19338 +DST( movl %edx, %es:12(%edi) )
19339
19340 SRC( movl 16(%esi), %ebx )
19341 SRC( movl 20(%esi), %edx )
19342 adcl %ebx, %eax
19343 -DST( movl %ebx, 16(%edi) )
19344 +DST( movl %ebx, %es:16(%edi) )
19345 adcl %edx, %eax
19346 -DST( movl %edx, 20(%edi) )
19347 +DST( movl %edx, %es:20(%edi) )
19348
19349 SRC( movl 24(%esi), %ebx )
19350 SRC( movl 28(%esi), %edx )
19351 adcl %ebx, %eax
19352 -DST( movl %ebx, 24(%edi) )
19353 +DST( movl %ebx, %es:24(%edi) )
19354 adcl %edx, %eax
19355 -DST( movl %edx, 28(%edi) )
19356 +DST( movl %edx, %es:28(%edi) )
19357
19358 lea 32(%esi), %esi
19359 lea 32(%edi), %edi
19360 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19361 shrl $2, %edx # This clears CF
19362 SRC(3: movl (%esi), %ebx )
19363 adcl %ebx, %eax
19364 -DST( movl %ebx, (%edi) )
19365 +DST( movl %ebx, %es:(%edi) )
19366 lea 4(%esi), %esi
19367 lea 4(%edi), %edi
19368 dec %edx
19369 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19370 jb 5f
19371 SRC( movw (%esi), %cx )
19372 leal 2(%esi), %esi
19373 -DST( movw %cx, (%edi) )
19374 +DST( movw %cx, %es:(%edi) )
19375 leal 2(%edi), %edi
19376 je 6f
19377 shll $16,%ecx
19378 SRC(5: movb (%esi), %cl )
19379 -DST( movb %cl, (%edi) )
19380 +DST( movb %cl, %es:(%edi) )
19381 6: addl %ecx, %eax
19382 adcl $0, %eax
19383 7:
19384 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19385
19386 6001:
19387 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19388 - movl $-EFAULT, (%ebx)
19389 + movl $-EFAULT, %ss:(%ebx)
19390
19391 # zero the complete destination - computing the rest
19392 # is too much work
19393 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19394
19395 6002:
19396 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19397 - movl $-EFAULT,(%ebx)
19398 + movl $-EFAULT,%ss:(%ebx)
19399 jmp 5000b
19400
19401 .previous
19402
19403 + pushl_cfi %ss
19404 + popl_cfi %ds
19405 + pushl_cfi %ss
19406 + popl_cfi %es
19407 popl_cfi %ebx
19408 CFI_RESTORE ebx
19409 popl_cfi %esi
19410 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19411 popl_cfi %ecx # equivalent to addl $4,%esp
19412 ret
19413 CFI_ENDPROC
19414 -ENDPROC(csum_partial_copy_generic)
19415 +ENDPROC(csum_partial_copy_generic_to_user)
19416
19417 #else
19418
19419 /* Version for PentiumII/PPro */
19420
19421 #define ROUND1(x) \
19422 + nop; nop; nop; \
19423 SRC(movl x(%esi), %ebx ) ; \
19424 addl %ebx, %eax ; \
19425 - DST(movl %ebx, x(%edi) ) ;
19426 + DST(movl %ebx, %es:x(%edi)) ;
19427
19428 #define ROUND(x) \
19429 + nop; nop; nop; \
19430 SRC(movl x(%esi), %ebx ) ; \
19431 adcl %ebx, %eax ; \
19432 - DST(movl %ebx, x(%edi) ) ;
19433 + DST(movl %ebx, %es:x(%edi)) ;
19434
19435 #define ARGBASE 12
19436 -
19437 -ENTRY(csum_partial_copy_generic)
19438 +
19439 +ENTRY(csum_partial_copy_generic_to_user)
19440 CFI_STARTPROC
19441 +
19442 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19443 + pushl_cfi %gs
19444 + popl_cfi %es
19445 + jmp csum_partial_copy_generic
19446 +#endif
19447 +
19448 +ENTRY(csum_partial_copy_generic_from_user)
19449 +
19450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19451 + pushl_cfi %gs
19452 + popl_cfi %ds
19453 +#endif
19454 +
19455 +ENTRY(csum_partial_copy_generic)
19456 pushl_cfi %ebx
19457 CFI_REL_OFFSET ebx, 0
19458 pushl_cfi %edi
19459 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19460 subl %ebx, %edi
19461 lea -1(%esi),%edx
19462 andl $-32,%edx
19463 - lea 3f(%ebx,%ebx), %ebx
19464 + lea 3f(%ebx,%ebx,2), %ebx
19465 testl %esi, %esi
19466 jmp *%ebx
19467 1: addl $64,%esi
19468 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19469 jb 5f
19470 SRC( movw (%esi), %dx )
19471 leal 2(%esi), %esi
19472 -DST( movw %dx, (%edi) )
19473 +DST( movw %dx, %es:(%edi) )
19474 leal 2(%edi), %edi
19475 je 6f
19476 shll $16,%edx
19477 5:
19478 SRC( movb (%esi), %dl )
19479 -DST( movb %dl, (%edi) )
19480 +DST( movb %dl, %es:(%edi) )
19481 6: addl %edx, %eax
19482 adcl $0, %eax
19483 7:
19484 .section .fixup, "ax"
19485 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19486 - movl $-EFAULT, (%ebx)
19487 + movl $-EFAULT, %ss:(%ebx)
19488 # zero the complete destination (computing the rest is too much work)
19489 movl ARGBASE+8(%esp),%edi # dst
19490 movl ARGBASE+12(%esp),%ecx # len
19491 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19492 rep; stosb
19493 jmp 7b
19494 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19495 - movl $-EFAULT, (%ebx)
19496 + movl $-EFAULT, %ss:(%ebx)
19497 jmp 7b
19498 .previous
19499
19500 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19501 + pushl_cfi %ss
19502 + popl_cfi %ds
19503 + pushl_cfi %ss
19504 + popl_cfi %es
19505 +#endif
19506 +
19507 popl_cfi %esi
19508 CFI_RESTORE esi
19509 popl_cfi %edi
19510 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19511 CFI_RESTORE ebx
19512 ret
19513 CFI_ENDPROC
19514 -ENDPROC(csum_partial_copy_generic)
19515 +ENDPROC(csum_partial_copy_generic_to_user)
19516
19517 #undef ROUND
19518 #undef ROUND1
19519 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19520 index f2145cf..cea889d 100644
19521 --- a/arch/x86/lib/clear_page_64.S
19522 +++ b/arch/x86/lib/clear_page_64.S
19523 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19524 movl $4096/8,%ecx
19525 xorl %eax,%eax
19526 rep stosq
19527 + pax_force_retaddr
19528 ret
19529 CFI_ENDPROC
19530 ENDPROC(clear_page_c)
19531 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19532 movl $4096,%ecx
19533 xorl %eax,%eax
19534 rep stosb
19535 + pax_force_retaddr
19536 ret
19537 CFI_ENDPROC
19538 ENDPROC(clear_page_c_e)
19539 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19540 leaq 64(%rdi),%rdi
19541 jnz .Lloop
19542 nop
19543 + pax_force_retaddr
19544 ret
19545 CFI_ENDPROC
19546 .Lclear_page_end:
19547 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19548
19549 #include <asm/cpufeature.h>
19550
19551 - .section .altinstr_replacement,"ax"
19552 + .section .altinstr_replacement,"a"
19553 1: .byte 0xeb /* jmp <disp8> */
19554 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19555 2: .byte 0xeb /* jmp <disp8> */
19556 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19557 index 1e572c5..2a162cd 100644
19558 --- a/arch/x86/lib/cmpxchg16b_emu.S
19559 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19560 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19561
19562 popf
19563 mov $1, %al
19564 + pax_force_retaddr
19565 ret
19566
19567 not_same:
19568 popf
19569 xor %al,%al
19570 + pax_force_retaddr
19571 ret
19572
19573 CFI_ENDPROC
19574 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19575 index 01c805b..dccb07f 100644
19576 --- a/arch/x86/lib/copy_page_64.S
19577 +++ b/arch/x86/lib/copy_page_64.S
19578 @@ -9,6 +9,7 @@ copy_page_c:
19579 CFI_STARTPROC
19580 movl $4096/8,%ecx
19581 rep movsq
19582 + pax_force_retaddr
19583 ret
19584 CFI_ENDPROC
19585 ENDPROC(copy_page_c)
19586 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19587 movq 16 (%rsi), %rdx
19588 movq 24 (%rsi), %r8
19589 movq 32 (%rsi), %r9
19590 - movq 40 (%rsi), %r10
19591 + movq 40 (%rsi), %r13
19592 movq 48 (%rsi), %r11
19593 movq 56 (%rsi), %r12
19594
19595 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19596 movq %rdx, 16 (%rdi)
19597 movq %r8, 24 (%rdi)
19598 movq %r9, 32 (%rdi)
19599 - movq %r10, 40 (%rdi)
19600 + movq %r13, 40 (%rdi)
19601 movq %r11, 48 (%rdi)
19602 movq %r12, 56 (%rdi)
19603
19604 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19605 movq 16 (%rsi), %rdx
19606 movq 24 (%rsi), %r8
19607 movq 32 (%rsi), %r9
19608 - movq 40 (%rsi), %r10
19609 + movq 40 (%rsi), %r13
19610 movq 48 (%rsi), %r11
19611 movq 56 (%rsi), %r12
19612
19613 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19614 movq %rdx, 16 (%rdi)
19615 movq %r8, 24 (%rdi)
19616 movq %r9, 32 (%rdi)
19617 - movq %r10, 40 (%rdi)
19618 + movq %r13, 40 (%rdi)
19619 movq %r11, 48 (%rdi)
19620 movq %r12, 56 (%rdi)
19621
19622 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19623 CFI_RESTORE r13
19624 addq $3*8,%rsp
19625 CFI_ADJUST_CFA_OFFSET -3*8
19626 + pax_force_retaddr
19627 ret
19628 .Lcopy_page_end:
19629 CFI_ENDPROC
19630 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19631
19632 #include <asm/cpufeature.h>
19633
19634 - .section .altinstr_replacement,"ax"
19635 + .section .altinstr_replacement,"a"
19636 1: .byte 0xeb /* jmp <disp8> */
19637 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19638 2:
19639 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19640 index 0248402..821c786 100644
19641 --- a/arch/x86/lib/copy_user_64.S
19642 +++ b/arch/x86/lib/copy_user_64.S
19643 @@ -16,6 +16,7 @@
19644 #include <asm/thread_info.h>
19645 #include <asm/cpufeature.h>
19646 #include <asm/alternative-asm.h>
19647 +#include <asm/pgtable.h>
19648
19649 /*
19650 * By placing feature2 after feature1 in altinstructions section, we logically
19651 @@ -29,7 +30,7 @@
19652 .byte 0xe9 /* 32bit jump */
19653 .long \orig-1f /* by default jump to orig */
19654 1:
19655 - .section .altinstr_replacement,"ax"
19656 + .section .altinstr_replacement,"a"
19657 2: .byte 0xe9 /* near jump with 32bit immediate */
19658 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19659 3: .byte 0xe9 /* near jump with 32bit immediate */
19660 @@ -71,47 +72,20 @@
19661 #endif
19662 .endm
19663
19664 -/* Standard copy_to_user with segment limit checking */
19665 -ENTRY(_copy_to_user)
19666 - CFI_STARTPROC
19667 - GET_THREAD_INFO(%rax)
19668 - movq %rdi,%rcx
19669 - addq %rdx,%rcx
19670 - jc bad_to_user
19671 - cmpq TI_addr_limit(%rax),%rcx
19672 - ja bad_to_user
19673 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19674 - copy_user_generic_unrolled,copy_user_generic_string, \
19675 - copy_user_enhanced_fast_string
19676 - CFI_ENDPROC
19677 -ENDPROC(_copy_to_user)
19678 -
19679 -/* Standard copy_from_user with segment limit checking */
19680 -ENTRY(_copy_from_user)
19681 - CFI_STARTPROC
19682 - GET_THREAD_INFO(%rax)
19683 - movq %rsi,%rcx
19684 - addq %rdx,%rcx
19685 - jc bad_from_user
19686 - cmpq TI_addr_limit(%rax),%rcx
19687 - ja bad_from_user
19688 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19689 - copy_user_generic_unrolled,copy_user_generic_string, \
19690 - copy_user_enhanced_fast_string
19691 - CFI_ENDPROC
19692 -ENDPROC(_copy_from_user)
19693 -
19694 .section .fixup,"ax"
19695 /* must zero dest */
19696 ENTRY(bad_from_user)
19697 bad_from_user:
19698 CFI_STARTPROC
19699 + testl %edx,%edx
19700 + js bad_to_user
19701 movl %edx,%ecx
19702 xorl %eax,%eax
19703 rep
19704 stosb
19705 bad_to_user:
19706 movl %edx,%eax
19707 + pax_force_retaddr
19708 ret
19709 CFI_ENDPROC
19710 ENDPROC(bad_from_user)
19711 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19712 jz 17f
19713 1: movq (%rsi),%r8
19714 2: movq 1*8(%rsi),%r9
19715 -3: movq 2*8(%rsi),%r10
19716 +3: movq 2*8(%rsi),%rax
19717 4: movq 3*8(%rsi),%r11
19718 5: movq %r8,(%rdi)
19719 6: movq %r9,1*8(%rdi)
19720 -7: movq %r10,2*8(%rdi)
19721 +7: movq %rax,2*8(%rdi)
19722 8: movq %r11,3*8(%rdi)
19723 9: movq 4*8(%rsi),%r8
19724 10: movq 5*8(%rsi),%r9
19725 -11: movq 6*8(%rsi),%r10
19726 +11: movq 6*8(%rsi),%rax
19727 12: movq 7*8(%rsi),%r11
19728 13: movq %r8,4*8(%rdi)
19729 14: movq %r9,5*8(%rdi)
19730 -15: movq %r10,6*8(%rdi)
19731 +15: movq %rax,6*8(%rdi)
19732 16: movq %r11,7*8(%rdi)
19733 leaq 64(%rsi),%rsi
19734 leaq 64(%rdi),%rdi
19735 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19736 decl %ecx
19737 jnz 21b
19738 23: xor %eax,%eax
19739 + pax_force_retaddr
19740 ret
19741
19742 .section .fixup,"ax"
19743 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19744 3: rep
19745 movsb
19746 4: xorl %eax,%eax
19747 + pax_force_retaddr
19748 ret
19749
19750 .section .fixup,"ax"
19751 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19752 1: rep
19753 movsb
19754 2: xorl %eax,%eax
19755 + pax_force_retaddr
19756 ret
19757
19758 .section .fixup,"ax"
19759 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19760 index cb0c112..e3a6895 100644
19761 --- a/arch/x86/lib/copy_user_nocache_64.S
19762 +++ b/arch/x86/lib/copy_user_nocache_64.S
19763 @@ -8,12 +8,14 @@
19764
19765 #include <linux/linkage.h>
19766 #include <asm/dwarf2.h>
19767 +#include <asm/alternative-asm.h>
19768
19769 #define FIX_ALIGNMENT 1
19770
19771 #include <asm/current.h>
19772 #include <asm/asm-offsets.h>
19773 #include <asm/thread_info.h>
19774 +#include <asm/pgtable.h>
19775
19776 .macro ALIGN_DESTINATION
19777 #ifdef FIX_ALIGNMENT
19778 @@ -50,6 +52,15 @@
19779 */
19780 ENTRY(__copy_user_nocache)
19781 CFI_STARTPROC
19782 +
19783 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19784 + mov $PAX_USER_SHADOW_BASE,%rcx
19785 + cmp %rcx,%rsi
19786 + jae 1f
19787 + add %rcx,%rsi
19788 +1:
19789 +#endif
19790 +
19791 cmpl $8,%edx
19792 jb 20f /* less then 8 bytes, go to byte copy loop */
19793 ALIGN_DESTINATION
19794 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19795 jz 17f
19796 1: movq (%rsi),%r8
19797 2: movq 1*8(%rsi),%r9
19798 -3: movq 2*8(%rsi),%r10
19799 +3: movq 2*8(%rsi),%rax
19800 4: movq 3*8(%rsi),%r11
19801 5: movnti %r8,(%rdi)
19802 6: movnti %r9,1*8(%rdi)
19803 -7: movnti %r10,2*8(%rdi)
19804 +7: movnti %rax,2*8(%rdi)
19805 8: movnti %r11,3*8(%rdi)
19806 9: movq 4*8(%rsi),%r8
19807 10: movq 5*8(%rsi),%r9
19808 -11: movq 6*8(%rsi),%r10
19809 +11: movq 6*8(%rsi),%rax
19810 12: movq 7*8(%rsi),%r11
19811 13: movnti %r8,4*8(%rdi)
19812 14: movnti %r9,5*8(%rdi)
19813 -15: movnti %r10,6*8(%rdi)
19814 +15: movnti %rax,6*8(%rdi)
19815 16: movnti %r11,7*8(%rdi)
19816 leaq 64(%rsi),%rsi
19817 leaq 64(%rdi),%rdi
19818 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19819 jnz 21b
19820 23: xorl %eax,%eax
19821 sfence
19822 + pax_force_retaddr
19823 ret
19824
19825 .section .fixup,"ax"
19826 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19827 index fb903b7..c92b7f7 100644
19828 --- a/arch/x86/lib/csum-copy_64.S
19829 +++ b/arch/x86/lib/csum-copy_64.S
19830 @@ -8,6 +8,7 @@
19831 #include <linux/linkage.h>
19832 #include <asm/dwarf2.h>
19833 #include <asm/errno.h>
19834 +#include <asm/alternative-asm.h>
19835
19836 /*
19837 * Checksum copy with exception handling.
19838 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19839 CFI_RESTORE rbp
19840 addq $7*8, %rsp
19841 CFI_ADJUST_CFA_OFFSET -7*8
19842 + pax_force_retaddr 0, 1
19843 ret
19844 CFI_RESTORE_STATE
19845
19846 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19847 index 459b58a..9570bc7 100644
19848 --- a/arch/x86/lib/csum-wrappers_64.c
19849 +++ b/arch/x86/lib/csum-wrappers_64.c
19850 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19851 len -= 2;
19852 }
19853 }
19854 - isum = csum_partial_copy_generic((__force const void *)src,
19855 +
19856 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19857 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19858 + src += PAX_USER_SHADOW_BASE;
19859 +#endif
19860 +
19861 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19862 dst, len, isum, errp, NULL);
19863 if (unlikely(*errp))
19864 goto out_err;
19865 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19866 }
19867
19868 *errp = 0;
19869 - return csum_partial_copy_generic(src, (void __force *)dst,
19870 +
19871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19872 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19873 + dst += PAX_USER_SHADOW_BASE;
19874 +#endif
19875 +
19876 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19877 len, isum, NULL, errp);
19878 }
19879 EXPORT_SYMBOL(csum_partial_copy_to_user);
19880 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19881 index 51f1504..ddac4c1 100644
19882 --- a/arch/x86/lib/getuser.S
19883 +++ b/arch/x86/lib/getuser.S
19884 @@ -33,15 +33,38 @@
19885 #include <asm/asm-offsets.h>
19886 #include <asm/thread_info.h>
19887 #include <asm/asm.h>
19888 +#include <asm/segment.h>
19889 +#include <asm/pgtable.h>
19890 +#include <asm/alternative-asm.h>
19891 +
19892 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19893 +#define __copyuser_seg gs;
19894 +#else
19895 +#define __copyuser_seg
19896 +#endif
19897
19898 .text
19899 ENTRY(__get_user_1)
19900 CFI_STARTPROC
19901 +
19902 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19903 GET_THREAD_INFO(%_ASM_DX)
19904 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19905 jae bad_get_user
19906 -1: movzb (%_ASM_AX),%edx
19907 +
19908 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19909 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19910 + cmp %_ASM_DX,%_ASM_AX
19911 + jae 1234f
19912 + add %_ASM_DX,%_ASM_AX
19913 +1234:
19914 +#endif
19915 +
19916 +#endif
19917 +
19918 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19919 xor %eax,%eax
19920 + pax_force_retaddr
19921 ret
19922 CFI_ENDPROC
19923 ENDPROC(__get_user_1)
19924 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19925 ENTRY(__get_user_2)
19926 CFI_STARTPROC
19927 add $1,%_ASM_AX
19928 +
19929 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19930 jc bad_get_user
19931 GET_THREAD_INFO(%_ASM_DX)
19932 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19933 jae bad_get_user
19934 -2: movzwl -1(%_ASM_AX),%edx
19935 +
19936 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19937 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19938 + cmp %_ASM_DX,%_ASM_AX
19939 + jae 1234f
19940 + add %_ASM_DX,%_ASM_AX
19941 +1234:
19942 +#endif
19943 +
19944 +#endif
19945 +
19946 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19947 xor %eax,%eax
19948 + pax_force_retaddr
19949 ret
19950 CFI_ENDPROC
19951 ENDPROC(__get_user_2)
19952 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19953 ENTRY(__get_user_4)
19954 CFI_STARTPROC
19955 add $3,%_ASM_AX
19956 +
19957 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19958 jc bad_get_user
19959 GET_THREAD_INFO(%_ASM_DX)
19960 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19961 jae bad_get_user
19962 -3: mov -3(%_ASM_AX),%edx
19963 +
19964 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19965 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19966 + cmp %_ASM_DX,%_ASM_AX
19967 + jae 1234f
19968 + add %_ASM_DX,%_ASM_AX
19969 +1234:
19970 +#endif
19971 +
19972 +#endif
19973 +
19974 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19975 xor %eax,%eax
19976 + pax_force_retaddr
19977 ret
19978 CFI_ENDPROC
19979 ENDPROC(__get_user_4)
19980 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19981 GET_THREAD_INFO(%_ASM_DX)
19982 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19983 jae bad_get_user
19984 +
19985 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19986 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19987 + cmp %_ASM_DX,%_ASM_AX
19988 + jae 1234f
19989 + add %_ASM_DX,%_ASM_AX
19990 +1234:
19991 +#endif
19992 +
19993 4: movq -7(%_ASM_AX),%_ASM_DX
19994 xor %eax,%eax
19995 + pax_force_retaddr
19996 ret
19997 CFI_ENDPROC
19998 ENDPROC(__get_user_8)
19999 @@ -91,6 +152,7 @@ bad_get_user:
20000 CFI_STARTPROC
20001 xor %edx,%edx
20002 mov $(-EFAULT),%_ASM_AX
20003 + pax_force_retaddr
20004 ret
20005 CFI_ENDPROC
20006 END(bad_get_user)
20007 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
20008 index 374562e..a75830b 100644
20009 --- a/arch/x86/lib/insn.c
20010 +++ b/arch/x86/lib/insn.c
20011 @@ -21,6 +21,11 @@
20012 #include <linux/string.h>
20013 #include <asm/inat.h>
20014 #include <asm/insn.h>
20015 +#ifdef __KERNEL__
20016 +#include <asm/pgtable_types.h>
20017 +#else
20018 +#define ktla_ktva(addr) addr
20019 +#endif
20020
20021 /* Verify next sizeof(t) bytes can be on the same instruction */
20022 #define validate_next(t, insn, n) \
20023 @@ -49,8 +54,8 @@
20024 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
20025 {
20026 memset(insn, 0, sizeof(*insn));
20027 - insn->kaddr = kaddr;
20028 - insn->next_byte = kaddr;
20029 + insn->kaddr = ktla_ktva(kaddr);
20030 + insn->next_byte = ktla_ktva(kaddr);
20031 insn->x86_64 = x86_64 ? 1 : 0;
20032 insn->opnd_bytes = 4;
20033 if (x86_64)
20034 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
20035 index 05a95e7..326f2fa 100644
20036 --- a/arch/x86/lib/iomap_copy_64.S
20037 +++ b/arch/x86/lib/iomap_copy_64.S
20038 @@ -17,6 +17,7 @@
20039
20040 #include <linux/linkage.h>
20041 #include <asm/dwarf2.h>
20042 +#include <asm/alternative-asm.h>
20043
20044 /*
20045 * override generic version in lib/iomap_copy.c
20046 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
20047 CFI_STARTPROC
20048 movl %edx,%ecx
20049 rep movsd
20050 + pax_force_retaddr
20051 ret
20052 CFI_ENDPROC
20053 ENDPROC(__iowrite32_copy)
20054 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
20055 index efbf2a0..8893637 100644
20056 --- a/arch/x86/lib/memcpy_64.S
20057 +++ b/arch/x86/lib/memcpy_64.S
20058 @@ -34,6 +34,7 @@
20059 rep movsq
20060 movl %edx, %ecx
20061 rep movsb
20062 + pax_force_retaddr
20063 ret
20064 .Lmemcpy_e:
20065 .previous
20066 @@ -51,6 +52,7 @@
20067
20068 movl %edx, %ecx
20069 rep movsb
20070 + pax_force_retaddr
20071 ret
20072 .Lmemcpy_e_e:
20073 .previous
20074 @@ -81,13 +83,13 @@ ENTRY(memcpy)
20075 */
20076 movq 0*8(%rsi), %r8
20077 movq 1*8(%rsi), %r9
20078 - movq 2*8(%rsi), %r10
20079 + movq 2*8(%rsi), %rcx
20080 movq 3*8(%rsi), %r11
20081 leaq 4*8(%rsi), %rsi
20082
20083 movq %r8, 0*8(%rdi)
20084 movq %r9, 1*8(%rdi)
20085 - movq %r10, 2*8(%rdi)
20086 + movq %rcx, 2*8(%rdi)
20087 movq %r11, 3*8(%rdi)
20088 leaq 4*8(%rdi), %rdi
20089 jae .Lcopy_forward_loop
20090 @@ -110,12 +112,12 @@ ENTRY(memcpy)
20091 subq $0x20, %rdx
20092 movq -1*8(%rsi), %r8
20093 movq -2*8(%rsi), %r9
20094 - movq -3*8(%rsi), %r10
20095 + movq -3*8(%rsi), %rcx
20096 movq -4*8(%rsi), %r11
20097 leaq -4*8(%rsi), %rsi
20098 movq %r8, -1*8(%rdi)
20099 movq %r9, -2*8(%rdi)
20100 - movq %r10, -3*8(%rdi)
20101 + movq %rcx, -3*8(%rdi)
20102 movq %r11, -4*8(%rdi)
20103 leaq -4*8(%rdi), %rdi
20104 jae .Lcopy_backward_loop
20105 @@ -135,12 +137,13 @@ ENTRY(memcpy)
20106 */
20107 movq 0*8(%rsi), %r8
20108 movq 1*8(%rsi), %r9
20109 - movq -2*8(%rsi, %rdx), %r10
20110 + movq -2*8(%rsi, %rdx), %rcx
20111 movq -1*8(%rsi, %rdx), %r11
20112 movq %r8, 0*8(%rdi)
20113 movq %r9, 1*8(%rdi)
20114 - movq %r10, -2*8(%rdi, %rdx)
20115 + movq %rcx, -2*8(%rdi, %rdx)
20116 movq %r11, -1*8(%rdi, %rdx)
20117 + pax_force_retaddr
20118 retq
20119 .p2align 4
20120 .Lless_16bytes:
20121 @@ -153,6 +156,7 @@ ENTRY(memcpy)
20122 movq -1*8(%rsi, %rdx), %r9
20123 movq %r8, 0*8(%rdi)
20124 movq %r9, -1*8(%rdi, %rdx)
20125 + pax_force_retaddr
20126 retq
20127 .p2align 4
20128 .Lless_8bytes:
20129 @@ -166,6 +170,7 @@ ENTRY(memcpy)
20130 movl -4(%rsi, %rdx), %r8d
20131 movl %ecx, (%rdi)
20132 movl %r8d, -4(%rdi, %rdx)
20133 + pax_force_retaddr
20134 retq
20135 .p2align 4
20136 .Lless_3bytes:
20137 @@ -183,6 +188,7 @@ ENTRY(memcpy)
20138 jnz .Lloop_1
20139
20140 .Lend:
20141 + pax_force_retaddr
20142 retq
20143 CFI_ENDPROC
20144 ENDPROC(memcpy)
20145 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
20146 index ee16461..c39c199 100644
20147 --- a/arch/x86/lib/memmove_64.S
20148 +++ b/arch/x86/lib/memmove_64.S
20149 @@ -61,13 +61,13 @@ ENTRY(memmove)
20150 5:
20151 sub $0x20, %rdx
20152 movq 0*8(%rsi), %r11
20153 - movq 1*8(%rsi), %r10
20154 + movq 1*8(%rsi), %rcx
20155 movq 2*8(%rsi), %r9
20156 movq 3*8(%rsi), %r8
20157 leaq 4*8(%rsi), %rsi
20158
20159 movq %r11, 0*8(%rdi)
20160 - movq %r10, 1*8(%rdi)
20161 + movq %rcx, 1*8(%rdi)
20162 movq %r9, 2*8(%rdi)
20163 movq %r8, 3*8(%rdi)
20164 leaq 4*8(%rdi), %rdi
20165 @@ -81,10 +81,10 @@ ENTRY(memmove)
20166 4:
20167 movq %rdx, %rcx
20168 movq -8(%rsi, %rdx), %r11
20169 - lea -8(%rdi, %rdx), %r10
20170 + lea -8(%rdi, %rdx), %r9
20171 shrq $3, %rcx
20172 rep movsq
20173 - movq %r11, (%r10)
20174 + movq %r11, (%r9)
20175 jmp 13f
20176 .Lmemmove_end_forward:
20177
20178 @@ -95,14 +95,14 @@ ENTRY(memmove)
20179 7:
20180 movq %rdx, %rcx
20181 movq (%rsi), %r11
20182 - movq %rdi, %r10
20183 + movq %rdi, %r9
20184 leaq -8(%rsi, %rdx), %rsi
20185 leaq -8(%rdi, %rdx), %rdi
20186 shrq $3, %rcx
20187 std
20188 rep movsq
20189 cld
20190 - movq %r11, (%r10)
20191 + movq %r11, (%r9)
20192 jmp 13f
20193
20194 /*
20195 @@ -127,13 +127,13 @@ ENTRY(memmove)
20196 8:
20197 subq $0x20, %rdx
20198 movq -1*8(%rsi), %r11
20199 - movq -2*8(%rsi), %r10
20200 + movq -2*8(%rsi), %rcx
20201 movq -3*8(%rsi), %r9
20202 movq -4*8(%rsi), %r8
20203 leaq -4*8(%rsi), %rsi
20204
20205 movq %r11, -1*8(%rdi)
20206 - movq %r10, -2*8(%rdi)
20207 + movq %rcx, -2*8(%rdi)
20208 movq %r9, -3*8(%rdi)
20209 movq %r8, -4*8(%rdi)
20210 leaq -4*8(%rdi), %rdi
20211 @@ -151,11 +151,11 @@ ENTRY(memmove)
20212 * Move data from 16 bytes to 31 bytes.
20213 */
20214 movq 0*8(%rsi), %r11
20215 - movq 1*8(%rsi), %r10
20216 + movq 1*8(%rsi), %rcx
20217 movq -2*8(%rsi, %rdx), %r9
20218 movq -1*8(%rsi, %rdx), %r8
20219 movq %r11, 0*8(%rdi)
20220 - movq %r10, 1*8(%rdi)
20221 + movq %rcx, 1*8(%rdi)
20222 movq %r9, -2*8(%rdi, %rdx)
20223 movq %r8, -1*8(%rdi, %rdx)
20224 jmp 13f
20225 @@ -167,9 +167,9 @@ ENTRY(memmove)
20226 * Move data from 8 bytes to 15 bytes.
20227 */
20228 movq 0*8(%rsi), %r11
20229 - movq -1*8(%rsi, %rdx), %r10
20230 + movq -1*8(%rsi, %rdx), %r9
20231 movq %r11, 0*8(%rdi)
20232 - movq %r10, -1*8(%rdi, %rdx)
20233 + movq %r9, -1*8(%rdi, %rdx)
20234 jmp 13f
20235 10:
20236 cmpq $4, %rdx
20237 @@ -178,9 +178,9 @@ ENTRY(memmove)
20238 * Move data from 4 bytes to 7 bytes.
20239 */
20240 movl (%rsi), %r11d
20241 - movl -4(%rsi, %rdx), %r10d
20242 + movl -4(%rsi, %rdx), %r9d
20243 movl %r11d, (%rdi)
20244 - movl %r10d, -4(%rdi, %rdx)
20245 + movl %r9d, -4(%rdi, %rdx)
20246 jmp 13f
20247 11:
20248 cmp $2, %rdx
20249 @@ -189,9 +189,9 @@ ENTRY(memmove)
20250 * Move data from 2 bytes to 3 bytes.
20251 */
20252 movw (%rsi), %r11w
20253 - movw -2(%rsi, %rdx), %r10w
20254 + movw -2(%rsi, %rdx), %r9w
20255 movw %r11w, (%rdi)
20256 - movw %r10w, -2(%rdi, %rdx)
20257 + movw %r9w, -2(%rdi, %rdx)
20258 jmp 13f
20259 12:
20260 cmp $1, %rdx
20261 @@ -202,6 +202,7 @@ ENTRY(memmove)
20262 movb (%rsi), %r11b
20263 movb %r11b, (%rdi)
20264 13:
20265 + pax_force_retaddr
20266 retq
20267 CFI_ENDPROC
20268
20269 @@ -210,6 +211,7 @@ ENTRY(memmove)
20270 /* Forward moving data. */
20271 movq %rdx, %rcx
20272 rep movsb
20273 + pax_force_retaddr
20274 retq
20275 .Lmemmove_end_forward_efs:
20276 .previous
20277 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20278 index 79bd454..dff325a 100644
20279 --- a/arch/x86/lib/memset_64.S
20280 +++ b/arch/x86/lib/memset_64.S
20281 @@ -31,6 +31,7 @@
20282 movl %r8d,%ecx
20283 rep stosb
20284 movq %r9,%rax
20285 + pax_force_retaddr
20286 ret
20287 .Lmemset_e:
20288 .previous
20289 @@ -53,6 +54,7 @@
20290 movl %edx,%ecx
20291 rep stosb
20292 movq %r9,%rax
20293 + pax_force_retaddr
20294 ret
20295 .Lmemset_e_e:
20296 .previous
20297 @@ -60,13 +62,13 @@
20298 ENTRY(memset)
20299 ENTRY(__memset)
20300 CFI_STARTPROC
20301 - movq %rdi,%r10
20302 movq %rdx,%r11
20303
20304 /* expand byte value */
20305 movzbl %sil,%ecx
20306 movabs $0x0101010101010101,%rax
20307 mul %rcx /* with rax, clobbers rdx */
20308 + movq %rdi,%rdx
20309
20310 /* align dst */
20311 movl %edi,%r9d
20312 @@ -120,7 +122,8 @@ ENTRY(__memset)
20313 jnz .Lloop_1
20314
20315 .Lende:
20316 - movq %r10,%rax
20317 + movq %rdx,%rax
20318 + pax_force_retaddr
20319 ret
20320
20321 CFI_RESTORE_STATE
20322 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20323 index c9f2d9b..e7fd2c0 100644
20324 --- a/arch/x86/lib/mmx_32.c
20325 +++ b/arch/x86/lib/mmx_32.c
20326 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20327 {
20328 void *p;
20329 int i;
20330 + unsigned long cr0;
20331
20332 if (unlikely(in_interrupt()))
20333 return __memcpy(to, from, len);
20334 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20335 kernel_fpu_begin();
20336
20337 __asm__ __volatile__ (
20338 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20339 - " prefetch 64(%0)\n"
20340 - " prefetch 128(%0)\n"
20341 - " prefetch 192(%0)\n"
20342 - " prefetch 256(%0)\n"
20343 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20344 + " prefetch 64(%1)\n"
20345 + " prefetch 128(%1)\n"
20346 + " prefetch 192(%1)\n"
20347 + " prefetch 256(%1)\n"
20348 "2: \n"
20349 ".section .fixup, \"ax\"\n"
20350 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20351 + "3: \n"
20352 +
20353 +#ifdef CONFIG_PAX_KERNEXEC
20354 + " movl %%cr0, %0\n"
20355 + " movl %0, %%eax\n"
20356 + " andl $0xFFFEFFFF, %%eax\n"
20357 + " movl %%eax, %%cr0\n"
20358 +#endif
20359 +
20360 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20361 +
20362 +#ifdef CONFIG_PAX_KERNEXEC
20363 + " movl %0, %%cr0\n"
20364 +#endif
20365 +
20366 " jmp 2b\n"
20367 ".previous\n"
20368 _ASM_EXTABLE(1b, 3b)
20369 - : : "r" (from));
20370 + : "=&r" (cr0) : "r" (from) : "ax");
20371
20372 for ( ; i > 5; i--) {
20373 __asm__ __volatile__ (
20374 - "1: prefetch 320(%0)\n"
20375 - "2: movq (%0), %%mm0\n"
20376 - " movq 8(%0), %%mm1\n"
20377 - " movq 16(%0), %%mm2\n"
20378 - " movq 24(%0), %%mm3\n"
20379 - " movq %%mm0, (%1)\n"
20380 - " movq %%mm1, 8(%1)\n"
20381 - " movq %%mm2, 16(%1)\n"
20382 - " movq %%mm3, 24(%1)\n"
20383 - " movq 32(%0), %%mm0\n"
20384 - " movq 40(%0), %%mm1\n"
20385 - " movq 48(%0), %%mm2\n"
20386 - " movq 56(%0), %%mm3\n"
20387 - " movq %%mm0, 32(%1)\n"
20388 - " movq %%mm1, 40(%1)\n"
20389 - " movq %%mm2, 48(%1)\n"
20390 - " movq %%mm3, 56(%1)\n"
20391 + "1: prefetch 320(%1)\n"
20392 + "2: movq (%1), %%mm0\n"
20393 + " movq 8(%1), %%mm1\n"
20394 + " movq 16(%1), %%mm2\n"
20395 + " movq 24(%1), %%mm3\n"
20396 + " movq %%mm0, (%2)\n"
20397 + " movq %%mm1, 8(%2)\n"
20398 + " movq %%mm2, 16(%2)\n"
20399 + " movq %%mm3, 24(%2)\n"
20400 + " movq 32(%1), %%mm0\n"
20401 + " movq 40(%1), %%mm1\n"
20402 + " movq 48(%1), %%mm2\n"
20403 + " movq 56(%1), %%mm3\n"
20404 + " movq %%mm0, 32(%2)\n"
20405 + " movq %%mm1, 40(%2)\n"
20406 + " movq %%mm2, 48(%2)\n"
20407 + " movq %%mm3, 56(%2)\n"
20408 ".section .fixup, \"ax\"\n"
20409 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20410 + "3:\n"
20411 +
20412 +#ifdef CONFIG_PAX_KERNEXEC
20413 + " movl %%cr0, %0\n"
20414 + " movl %0, %%eax\n"
20415 + " andl $0xFFFEFFFF, %%eax\n"
20416 + " movl %%eax, %%cr0\n"
20417 +#endif
20418 +
20419 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20420 +
20421 +#ifdef CONFIG_PAX_KERNEXEC
20422 + " movl %0, %%cr0\n"
20423 +#endif
20424 +
20425 " jmp 2b\n"
20426 ".previous\n"
20427 _ASM_EXTABLE(1b, 3b)
20428 - : : "r" (from), "r" (to) : "memory");
20429 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20430
20431 from += 64;
20432 to += 64;
20433 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20434 static void fast_copy_page(void *to, void *from)
20435 {
20436 int i;
20437 + unsigned long cr0;
20438
20439 kernel_fpu_begin();
20440
20441 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20442 * but that is for later. -AV
20443 */
20444 __asm__ __volatile__(
20445 - "1: prefetch (%0)\n"
20446 - " prefetch 64(%0)\n"
20447 - " prefetch 128(%0)\n"
20448 - " prefetch 192(%0)\n"
20449 - " prefetch 256(%0)\n"
20450 + "1: prefetch (%1)\n"
20451 + " prefetch 64(%1)\n"
20452 + " prefetch 128(%1)\n"
20453 + " prefetch 192(%1)\n"
20454 + " prefetch 256(%1)\n"
20455 "2: \n"
20456 ".section .fixup, \"ax\"\n"
20457 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20458 + "3: \n"
20459 +
20460 +#ifdef CONFIG_PAX_KERNEXEC
20461 + " movl %%cr0, %0\n"
20462 + " movl %0, %%eax\n"
20463 + " andl $0xFFFEFFFF, %%eax\n"
20464 + " movl %%eax, %%cr0\n"
20465 +#endif
20466 +
20467 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20468 +
20469 +#ifdef CONFIG_PAX_KERNEXEC
20470 + " movl %0, %%cr0\n"
20471 +#endif
20472 +
20473 " jmp 2b\n"
20474 ".previous\n"
20475 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20476 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20477
20478 for (i = 0; i < (4096-320)/64; i++) {
20479 __asm__ __volatile__ (
20480 - "1: prefetch 320(%0)\n"
20481 - "2: movq (%0), %%mm0\n"
20482 - " movntq %%mm0, (%1)\n"
20483 - " movq 8(%0), %%mm1\n"
20484 - " movntq %%mm1, 8(%1)\n"
20485 - " movq 16(%0), %%mm2\n"
20486 - " movntq %%mm2, 16(%1)\n"
20487 - " movq 24(%0), %%mm3\n"
20488 - " movntq %%mm3, 24(%1)\n"
20489 - " movq 32(%0), %%mm4\n"
20490 - " movntq %%mm4, 32(%1)\n"
20491 - " movq 40(%0), %%mm5\n"
20492 - " movntq %%mm5, 40(%1)\n"
20493 - " movq 48(%0), %%mm6\n"
20494 - " movntq %%mm6, 48(%1)\n"
20495 - " movq 56(%0), %%mm7\n"
20496 - " movntq %%mm7, 56(%1)\n"
20497 + "1: prefetch 320(%1)\n"
20498 + "2: movq (%1), %%mm0\n"
20499 + " movntq %%mm0, (%2)\n"
20500 + " movq 8(%1), %%mm1\n"
20501 + " movntq %%mm1, 8(%2)\n"
20502 + " movq 16(%1), %%mm2\n"
20503 + " movntq %%mm2, 16(%2)\n"
20504 + " movq 24(%1), %%mm3\n"
20505 + " movntq %%mm3, 24(%2)\n"
20506 + " movq 32(%1), %%mm4\n"
20507 + " movntq %%mm4, 32(%2)\n"
20508 + " movq 40(%1), %%mm5\n"
20509 + " movntq %%mm5, 40(%2)\n"
20510 + " movq 48(%1), %%mm6\n"
20511 + " movntq %%mm6, 48(%2)\n"
20512 + " movq 56(%1), %%mm7\n"
20513 + " movntq %%mm7, 56(%2)\n"
20514 ".section .fixup, \"ax\"\n"
20515 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20516 + "3:\n"
20517 +
20518 +#ifdef CONFIG_PAX_KERNEXEC
20519 + " movl %%cr0, %0\n"
20520 + " movl %0, %%eax\n"
20521 + " andl $0xFFFEFFFF, %%eax\n"
20522 + " movl %%eax, %%cr0\n"
20523 +#endif
20524 +
20525 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20526 +
20527 +#ifdef CONFIG_PAX_KERNEXEC
20528 + " movl %0, %%cr0\n"
20529 +#endif
20530 +
20531 " jmp 2b\n"
20532 ".previous\n"
20533 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20534 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20535
20536 from += 64;
20537 to += 64;
20538 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20539 static void fast_copy_page(void *to, void *from)
20540 {
20541 int i;
20542 + unsigned long cr0;
20543
20544 kernel_fpu_begin();
20545
20546 __asm__ __volatile__ (
20547 - "1: prefetch (%0)\n"
20548 - " prefetch 64(%0)\n"
20549 - " prefetch 128(%0)\n"
20550 - " prefetch 192(%0)\n"
20551 - " prefetch 256(%0)\n"
20552 + "1: prefetch (%1)\n"
20553 + " prefetch 64(%1)\n"
20554 + " prefetch 128(%1)\n"
20555 + " prefetch 192(%1)\n"
20556 + " prefetch 256(%1)\n"
20557 "2: \n"
20558 ".section .fixup, \"ax\"\n"
20559 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20560 + "3: \n"
20561 +
20562 +#ifdef CONFIG_PAX_KERNEXEC
20563 + " movl %%cr0, %0\n"
20564 + " movl %0, %%eax\n"
20565 + " andl $0xFFFEFFFF, %%eax\n"
20566 + " movl %%eax, %%cr0\n"
20567 +#endif
20568 +
20569 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20570 +
20571 +#ifdef CONFIG_PAX_KERNEXEC
20572 + " movl %0, %%cr0\n"
20573 +#endif
20574 +
20575 " jmp 2b\n"
20576 ".previous\n"
20577 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20578 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20579
20580 for (i = 0; i < 4096/64; i++) {
20581 __asm__ __volatile__ (
20582 - "1: prefetch 320(%0)\n"
20583 - "2: movq (%0), %%mm0\n"
20584 - " movq 8(%0), %%mm1\n"
20585 - " movq 16(%0), %%mm2\n"
20586 - " movq 24(%0), %%mm3\n"
20587 - " movq %%mm0, (%1)\n"
20588 - " movq %%mm1, 8(%1)\n"
20589 - " movq %%mm2, 16(%1)\n"
20590 - " movq %%mm3, 24(%1)\n"
20591 - " movq 32(%0), %%mm0\n"
20592 - " movq 40(%0), %%mm1\n"
20593 - " movq 48(%0), %%mm2\n"
20594 - " movq 56(%0), %%mm3\n"
20595 - " movq %%mm0, 32(%1)\n"
20596 - " movq %%mm1, 40(%1)\n"
20597 - " movq %%mm2, 48(%1)\n"
20598 - " movq %%mm3, 56(%1)\n"
20599 + "1: prefetch 320(%1)\n"
20600 + "2: movq (%1), %%mm0\n"
20601 + " movq 8(%1), %%mm1\n"
20602 + " movq 16(%1), %%mm2\n"
20603 + " movq 24(%1), %%mm3\n"
20604 + " movq %%mm0, (%2)\n"
20605 + " movq %%mm1, 8(%2)\n"
20606 + " movq %%mm2, 16(%2)\n"
20607 + " movq %%mm3, 24(%2)\n"
20608 + " movq 32(%1), %%mm0\n"
20609 + " movq 40(%1), %%mm1\n"
20610 + " movq 48(%1), %%mm2\n"
20611 + " movq 56(%1), %%mm3\n"
20612 + " movq %%mm0, 32(%2)\n"
20613 + " movq %%mm1, 40(%2)\n"
20614 + " movq %%mm2, 48(%2)\n"
20615 + " movq %%mm3, 56(%2)\n"
20616 ".section .fixup, \"ax\"\n"
20617 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20618 + "3:\n"
20619 +
20620 +#ifdef CONFIG_PAX_KERNEXEC
20621 + " movl %%cr0, %0\n"
20622 + " movl %0, %%eax\n"
20623 + " andl $0xFFFEFFFF, %%eax\n"
20624 + " movl %%eax, %%cr0\n"
20625 +#endif
20626 +
20627 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20628 +
20629 +#ifdef CONFIG_PAX_KERNEXEC
20630 + " movl %0, %%cr0\n"
20631 +#endif
20632 +
20633 " jmp 2b\n"
20634 ".previous\n"
20635 _ASM_EXTABLE(1b, 3b)
20636 - : : "r" (from), "r" (to) : "memory");
20637 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20638
20639 from += 64;
20640 to += 64;
20641 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20642 index 69fa106..adda88b 100644
20643 --- a/arch/x86/lib/msr-reg.S
20644 +++ b/arch/x86/lib/msr-reg.S
20645 @@ -3,6 +3,7 @@
20646 #include <asm/dwarf2.h>
20647 #include <asm/asm.h>
20648 #include <asm/msr.h>
20649 +#include <asm/alternative-asm.h>
20650
20651 #ifdef CONFIG_X86_64
20652 /*
20653 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20654 CFI_STARTPROC
20655 pushq_cfi %rbx
20656 pushq_cfi %rbp
20657 - movq %rdi, %r10 /* Save pointer */
20658 + movq %rdi, %r9 /* Save pointer */
20659 xorl %r11d, %r11d /* Return value */
20660 movl (%rdi), %eax
20661 movl 4(%rdi), %ecx
20662 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20663 movl 28(%rdi), %edi
20664 CFI_REMEMBER_STATE
20665 1: \op
20666 -2: movl %eax, (%r10)
20667 +2: movl %eax, (%r9)
20668 movl %r11d, %eax /* Return value */
20669 - movl %ecx, 4(%r10)
20670 - movl %edx, 8(%r10)
20671 - movl %ebx, 12(%r10)
20672 - movl %ebp, 20(%r10)
20673 - movl %esi, 24(%r10)
20674 - movl %edi, 28(%r10)
20675 + movl %ecx, 4(%r9)
20676 + movl %edx, 8(%r9)
20677 + movl %ebx, 12(%r9)
20678 + movl %ebp, 20(%r9)
20679 + movl %esi, 24(%r9)
20680 + movl %edi, 28(%r9)
20681 popq_cfi %rbp
20682 popq_cfi %rbx
20683 + pax_force_retaddr
20684 ret
20685 3:
20686 CFI_RESTORE_STATE
20687 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20688 index 36b0d15..d381858 100644
20689 --- a/arch/x86/lib/putuser.S
20690 +++ b/arch/x86/lib/putuser.S
20691 @@ -15,7 +15,9 @@
20692 #include <asm/thread_info.h>
20693 #include <asm/errno.h>
20694 #include <asm/asm.h>
20695 -
20696 +#include <asm/segment.h>
20697 +#include <asm/pgtable.h>
20698 +#include <asm/alternative-asm.h>
20699
20700 /*
20701 * __put_user_X
20702 @@ -29,52 +31,119 @@
20703 * as they get called from within inline assembly.
20704 */
20705
20706 -#define ENTER CFI_STARTPROC ; \
20707 - GET_THREAD_INFO(%_ASM_BX)
20708 -#define EXIT ret ; \
20709 +#define ENTER CFI_STARTPROC
20710 +#define EXIT pax_force_retaddr; ret ; \
20711 CFI_ENDPROC
20712
20713 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20714 +#define _DEST %_ASM_CX,%_ASM_BX
20715 +#else
20716 +#define _DEST %_ASM_CX
20717 +#endif
20718 +
20719 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20720 +#define __copyuser_seg gs;
20721 +#else
20722 +#define __copyuser_seg
20723 +#endif
20724 +
20725 .text
20726 ENTRY(__put_user_1)
20727 ENTER
20728 +
20729 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20730 + GET_THREAD_INFO(%_ASM_BX)
20731 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20732 jae bad_put_user
20733 -1: movb %al,(%_ASM_CX)
20734 +
20735 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20736 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20737 + cmp %_ASM_BX,%_ASM_CX
20738 + jb 1234f
20739 + xor %ebx,%ebx
20740 +1234:
20741 +#endif
20742 +
20743 +#endif
20744 +
20745 +1: __copyuser_seg movb %al,(_DEST)
20746 xor %eax,%eax
20747 EXIT
20748 ENDPROC(__put_user_1)
20749
20750 ENTRY(__put_user_2)
20751 ENTER
20752 +
20753 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20754 + GET_THREAD_INFO(%_ASM_BX)
20755 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20756 sub $1,%_ASM_BX
20757 cmp %_ASM_BX,%_ASM_CX
20758 jae bad_put_user
20759 -2: movw %ax,(%_ASM_CX)
20760 +
20761 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20762 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20763 + cmp %_ASM_BX,%_ASM_CX
20764 + jb 1234f
20765 + xor %ebx,%ebx
20766 +1234:
20767 +#endif
20768 +
20769 +#endif
20770 +
20771 +2: __copyuser_seg movw %ax,(_DEST)
20772 xor %eax,%eax
20773 EXIT
20774 ENDPROC(__put_user_2)
20775
20776 ENTRY(__put_user_4)
20777 ENTER
20778 +
20779 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20780 + GET_THREAD_INFO(%_ASM_BX)
20781 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20782 sub $3,%_ASM_BX
20783 cmp %_ASM_BX,%_ASM_CX
20784 jae bad_put_user
20785 -3: movl %eax,(%_ASM_CX)
20786 +
20787 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20788 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20789 + cmp %_ASM_BX,%_ASM_CX
20790 + jb 1234f
20791 + xor %ebx,%ebx
20792 +1234:
20793 +#endif
20794 +
20795 +#endif
20796 +
20797 +3: __copyuser_seg movl %eax,(_DEST)
20798 xor %eax,%eax
20799 EXIT
20800 ENDPROC(__put_user_4)
20801
20802 ENTRY(__put_user_8)
20803 ENTER
20804 +
20805 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20806 + GET_THREAD_INFO(%_ASM_BX)
20807 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20808 sub $7,%_ASM_BX
20809 cmp %_ASM_BX,%_ASM_CX
20810 jae bad_put_user
20811 -4: mov %_ASM_AX,(%_ASM_CX)
20812 +
20813 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20814 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20815 + cmp %_ASM_BX,%_ASM_CX
20816 + jb 1234f
20817 + xor %ebx,%ebx
20818 +1234:
20819 +#endif
20820 +
20821 +#endif
20822 +
20823 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20824 #ifdef CONFIG_X86_32
20825 -5: movl %edx,4(%_ASM_CX)
20826 +5: __copyuser_seg movl %edx,4(_DEST)
20827 #endif
20828 xor %eax,%eax
20829 EXIT
20830 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20831 index 1cad221..de671ee 100644
20832 --- a/arch/x86/lib/rwlock.S
20833 +++ b/arch/x86/lib/rwlock.S
20834 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20835 FRAME
20836 0: LOCK_PREFIX
20837 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20838 +
20839 +#ifdef CONFIG_PAX_REFCOUNT
20840 + jno 1234f
20841 + LOCK_PREFIX
20842 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20843 + int $4
20844 +1234:
20845 + _ASM_EXTABLE(1234b, 1234b)
20846 +#endif
20847 +
20848 1: rep; nop
20849 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20850 jne 1b
20851 LOCK_PREFIX
20852 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20853 +
20854 +#ifdef CONFIG_PAX_REFCOUNT
20855 + jno 1234f
20856 + LOCK_PREFIX
20857 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20858 + int $4
20859 +1234:
20860 + _ASM_EXTABLE(1234b, 1234b)
20861 +#endif
20862 +
20863 jnz 0b
20864 ENDFRAME
20865 + pax_force_retaddr
20866 ret
20867 CFI_ENDPROC
20868 END(__write_lock_failed)
20869 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20870 FRAME
20871 0: LOCK_PREFIX
20872 READ_LOCK_SIZE(inc) (%__lock_ptr)
20873 +
20874 +#ifdef CONFIG_PAX_REFCOUNT
20875 + jno 1234f
20876 + LOCK_PREFIX
20877 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20878 + int $4
20879 +1234:
20880 + _ASM_EXTABLE(1234b, 1234b)
20881 +#endif
20882 +
20883 1: rep; nop
20884 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20885 js 1b
20886 LOCK_PREFIX
20887 READ_LOCK_SIZE(dec) (%__lock_ptr)
20888 +
20889 +#ifdef CONFIG_PAX_REFCOUNT
20890 + jno 1234f
20891 + LOCK_PREFIX
20892 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20893 + int $4
20894 +1234:
20895 + _ASM_EXTABLE(1234b, 1234b)
20896 +#endif
20897 +
20898 js 0b
20899 ENDFRAME
20900 + pax_force_retaddr
20901 ret
20902 CFI_ENDPROC
20903 END(__read_lock_failed)
20904 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20905 index 5dff5f0..cadebf4 100644
20906 --- a/arch/x86/lib/rwsem.S
20907 +++ b/arch/x86/lib/rwsem.S
20908 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20909 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20910 CFI_RESTORE __ASM_REG(dx)
20911 restore_common_regs
20912 + pax_force_retaddr
20913 ret
20914 CFI_ENDPROC
20915 ENDPROC(call_rwsem_down_read_failed)
20916 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20917 movq %rax,%rdi
20918 call rwsem_down_write_failed
20919 restore_common_regs
20920 + pax_force_retaddr
20921 ret
20922 CFI_ENDPROC
20923 ENDPROC(call_rwsem_down_write_failed)
20924 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20925 movq %rax,%rdi
20926 call rwsem_wake
20927 restore_common_regs
20928 -1: ret
20929 +1: pax_force_retaddr
20930 + ret
20931 CFI_ENDPROC
20932 ENDPROC(call_rwsem_wake)
20933
20934 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20935 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20936 CFI_RESTORE __ASM_REG(dx)
20937 restore_common_regs
20938 + pax_force_retaddr
20939 ret
20940 CFI_ENDPROC
20941 ENDPROC(call_rwsem_downgrade_wake)
20942 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20943 index a63efd6..ccecad8 100644
20944 --- a/arch/x86/lib/thunk_64.S
20945 +++ b/arch/x86/lib/thunk_64.S
20946 @@ -8,6 +8,7 @@
20947 #include <linux/linkage.h>
20948 #include <asm/dwarf2.h>
20949 #include <asm/calling.h>
20950 +#include <asm/alternative-asm.h>
20951
20952 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20953 .macro THUNK name, func, put_ret_addr_in_rdi=0
20954 @@ -41,5 +42,6 @@
20955 SAVE_ARGS
20956 restore:
20957 RESTORE_ARGS
20958 + pax_force_retaddr
20959 ret
20960 CFI_ENDPROC
20961 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20962 index e218d5d..35679b4 100644
20963 --- a/arch/x86/lib/usercopy_32.c
20964 +++ b/arch/x86/lib/usercopy_32.c
20965 @@ -43,7 +43,7 @@ do { \
20966 __asm__ __volatile__( \
20967 " testl %1,%1\n" \
20968 " jz 2f\n" \
20969 - "0: lodsb\n" \
20970 + "0: "__copyuser_seg"lodsb\n" \
20971 " stosb\n" \
20972 " testb %%al,%%al\n" \
20973 " jz 1f\n" \
20974 @@ -128,10 +128,12 @@ do { \
20975 int __d0; \
20976 might_fault(); \
20977 __asm__ __volatile__( \
20978 + __COPYUSER_SET_ES \
20979 "0: rep; stosl\n" \
20980 " movl %2,%0\n" \
20981 "1: rep; stosb\n" \
20982 "2:\n" \
20983 + __COPYUSER_RESTORE_ES \
20984 ".section .fixup,\"ax\"\n" \
20985 "3: lea 0(%2,%0,4),%0\n" \
20986 " jmp 2b\n" \
20987 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20988 might_fault();
20989
20990 __asm__ __volatile__(
20991 + __COPYUSER_SET_ES
20992 " testl %0, %0\n"
20993 " jz 3f\n"
20994 " andl %0,%%ecx\n"
20995 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20996 " subl %%ecx,%0\n"
20997 " addl %0,%%eax\n"
20998 "1:\n"
20999 + __COPYUSER_RESTORE_ES
21000 ".section .fixup,\"ax\"\n"
21001 "2: xorl %%eax,%%eax\n"
21002 " jmp 1b\n"
21003 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
21004
21005 #ifdef CONFIG_X86_INTEL_USERCOPY
21006 static unsigned long
21007 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
21008 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
21009 {
21010 int d0, d1;
21011 __asm__ __volatile__(
21012 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21013 " .align 2,0x90\n"
21014 "3: movl 0(%4), %%eax\n"
21015 "4: movl 4(%4), %%edx\n"
21016 - "5: movl %%eax, 0(%3)\n"
21017 - "6: movl %%edx, 4(%3)\n"
21018 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
21019 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
21020 "7: movl 8(%4), %%eax\n"
21021 "8: movl 12(%4),%%edx\n"
21022 - "9: movl %%eax, 8(%3)\n"
21023 - "10: movl %%edx, 12(%3)\n"
21024 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
21025 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
21026 "11: movl 16(%4), %%eax\n"
21027 "12: movl 20(%4), %%edx\n"
21028 - "13: movl %%eax, 16(%3)\n"
21029 - "14: movl %%edx, 20(%3)\n"
21030 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
21031 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
21032 "15: movl 24(%4), %%eax\n"
21033 "16: movl 28(%4), %%edx\n"
21034 - "17: movl %%eax, 24(%3)\n"
21035 - "18: movl %%edx, 28(%3)\n"
21036 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
21037 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
21038 "19: movl 32(%4), %%eax\n"
21039 "20: movl 36(%4), %%edx\n"
21040 - "21: movl %%eax, 32(%3)\n"
21041 - "22: movl %%edx, 36(%3)\n"
21042 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
21043 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
21044 "23: movl 40(%4), %%eax\n"
21045 "24: movl 44(%4), %%edx\n"
21046 - "25: movl %%eax, 40(%3)\n"
21047 - "26: movl %%edx, 44(%3)\n"
21048 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
21049 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
21050 "27: movl 48(%4), %%eax\n"
21051 "28: movl 52(%4), %%edx\n"
21052 - "29: movl %%eax, 48(%3)\n"
21053 - "30: movl %%edx, 52(%3)\n"
21054 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
21055 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
21056 "31: movl 56(%4), %%eax\n"
21057 "32: movl 60(%4), %%edx\n"
21058 - "33: movl %%eax, 56(%3)\n"
21059 - "34: movl %%edx, 60(%3)\n"
21060 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
21061 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
21062 " addl $-64, %0\n"
21063 " addl $64, %4\n"
21064 " addl $64, %3\n"
21065 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21066 " shrl $2, %0\n"
21067 " andl $3, %%eax\n"
21068 " cld\n"
21069 + __COPYUSER_SET_ES
21070 "99: rep; movsl\n"
21071 "36: movl %%eax, %0\n"
21072 "37: rep; movsb\n"
21073 "100:\n"
21074 + __COPYUSER_RESTORE_ES
21075 + ".section .fixup,\"ax\"\n"
21076 + "101: lea 0(%%eax,%0,4),%0\n"
21077 + " jmp 100b\n"
21078 + ".previous\n"
21079 + ".section __ex_table,\"a\"\n"
21080 + " .align 4\n"
21081 + " .long 1b,100b\n"
21082 + " .long 2b,100b\n"
21083 + " .long 3b,100b\n"
21084 + " .long 4b,100b\n"
21085 + " .long 5b,100b\n"
21086 + " .long 6b,100b\n"
21087 + " .long 7b,100b\n"
21088 + " .long 8b,100b\n"
21089 + " .long 9b,100b\n"
21090 + " .long 10b,100b\n"
21091 + " .long 11b,100b\n"
21092 + " .long 12b,100b\n"
21093 + " .long 13b,100b\n"
21094 + " .long 14b,100b\n"
21095 + " .long 15b,100b\n"
21096 + " .long 16b,100b\n"
21097 + " .long 17b,100b\n"
21098 + " .long 18b,100b\n"
21099 + " .long 19b,100b\n"
21100 + " .long 20b,100b\n"
21101 + " .long 21b,100b\n"
21102 + " .long 22b,100b\n"
21103 + " .long 23b,100b\n"
21104 + " .long 24b,100b\n"
21105 + " .long 25b,100b\n"
21106 + " .long 26b,100b\n"
21107 + " .long 27b,100b\n"
21108 + " .long 28b,100b\n"
21109 + " .long 29b,100b\n"
21110 + " .long 30b,100b\n"
21111 + " .long 31b,100b\n"
21112 + " .long 32b,100b\n"
21113 + " .long 33b,100b\n"
21114 + " .long 34b,100b\n"
21115 + " .long 35b,100b\n"
21116 + " .long 36b,100b\n"
21117 + " .long 37b,100b\n"
21118 + " .long 99b,101b\n"
21119 + ".previous"
21120 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
21121 + : "1"(to), "2"(from), "0"(size)
21122 + : "eax", "edx", "memory");
21123 + return size;
21124 +}
21125 +
21126 +static unsigned long
21127 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
21128 +{
21129 + int d0, d1;
21130 + __asm__ __volatile__(
21131 + " .align 2,0x90\n"
21132 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
21133 + " cmpl $67, %0\n"
21134 + " jbe 3f\n"
21135 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
21136 + " .align 2,0x90\n"
21137 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
21138 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
21139 + "5: movl %%eax, 0(%3)\n"
21140 + "6: movl %%edx, 4(%3)\n"
21141 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
21142 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
21143 + "9: movl %%eax, 8(%3)\n"
21144 + "10: movl %%edx, 12(%3)\n"
21145 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
21146 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
21147 + "13: movl %%eax, 16(%3)\n"
21148 + "14: movl %%edx, 20(%3)\n"
21149 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
21150 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
21151 + "17: movl %%eax, 24(%3)\n"
21152 + "18: movl %%edx, 28(%3)\n"
21153 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
21154 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
21155 + "21: movl %%eax, 32(%3)\n"
21156 + "22: movl %%edx, 36(%3)\n"
21157 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
21158 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
21159 + "25: movl %%eax, 40(%3)\n"
21160 + "26: movl %%edx, 44(%3)\n"
21161 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
21162 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
21163 + "29: movl %%eax, 48(%3)\n"
21164 + "30: movl %%edx, 52(%3)\n"
21165 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
21166 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
21167 + "33: movl %%eax, 56(%3)\n"
21168 + "34: movl %%edx, 60(%3)\n"
21169 + " addl $-64, %0\n"
21170 + " addl $64, %4\n"
21171 + " addl $64, %3\n"
21172 + " cmpl $63, %0\n"
21173 + " ja 1b\n"
21174 + "35: movl %0, %%eax\n"
21175 + " shrl $2, %0\n"
21176 + " andl $3, %%eax\n"
21177 + " cld\n"
21178 + "99: rep; "__copyuser_seg" movsl\n"
21179 + "36: movl %%eax, %0\n"
21180 + "37: rep; "__copyuser_seg" movsb\n"
21181 + "100:\n"
21182 ".section .fixup,\"ax\"\n"
21183 "101: lea 0(%%eax,%0,4),%0\n"
21184 " jmp 100b\n"
21185 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21186 int d0, d1;
21187 __asm__ __volatile__(
21188 " .align 2,0x90\n"
21189 - "0: movl 32(%4), %%eax\n"
21190 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21191 " cmpl $67, %0\n"
21192 " jbe 2f\n"
21193 - "1: movl 64(%4), %%eax\n"
21194 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21195 " .align 2,0x90\n"
21196 - "2: movl 0(%4), %%eax\n"
21197 - "21: movl 4(%4), %%edx\n"
21198 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21199 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21200 " movl %%eax, 0(%3)\n"
21201 " movl %%edx, 4(%3)\n"
21202 - "3: movl 8(%4), %%eax\n"
21203 - "31: movl 12(%4),%%edx\n"
21204 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21205 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21206 " movl %%eax, 8(%3)\n"
21207 " movl %%edx, 12(%3)\n"
21208 - "4: movl 16(%4), %%eax\n"
21209 - "41: movl 20(%4), %%edx\n"
21210 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21211 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21212 " movl %%eax, 16(%3)\n"
21213 " movl %%edx, 20(%3)\n"
21214 - "10: movl 24(%4), %%eax\n"
21215 - "51: movl 28(%4), %%edx\n"
21216 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21217 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21218 " movl %%eax, 24(%3)\n"
21219 " movl %%edx, 28(%3)\n"
21220 - "11: movl 32(%4), %%eax\n"
21221 - "61: movl 36(%4), %%edx\n"
21222 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21223 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21224 " movl %%eax, 32(%3)\n"
21225 " movl %%edx, 36(%3)\n"
21226 - "12: movl 40(%4), %%eax\n"
21227 - "71: movl 44(%4), %%edx\n"
21228 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21229 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21230 " movl %%eax, 40(%3)\n"
21231 " movl %%edx, 44(%3)\n"
21232 - "13: movl 48(%4), %%eax\n"
21233 - "81: movl 52(%4), %%edx\n"
21234 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21235 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21236 " movl %%eax, 48(%3)\n"
21237 " movl %%edx, 52(%3)\n"
21238 - "14: movl 56(%4), %%eax\n"
21239 - "91: movl 60(%4), %%edx\n"
21240 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21241 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21242 " movl %%eax, 56(%3)\n"
21243 " movl %%edx, 60(%3)\n"
21244 " addl $-64, %0\n"
21245 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21246 " shrl $2, %0\n"
21247 " andl $3, %%eax\n"
21248 " cld\n"
21249 - "6: rep; movsl\n"
21250 + "6: rep; "__copyuser_seg" movsl\n"
21251 " movl %%eax,%0\n"
21252 - "7: rep; movsb\n"
21253 + "7: rep; "__copyuser_seg" movsb\n"
21254 "8:\n"
21255 ".section .fixup,\"ax\"\n"
21256 "9: lea 0(%%eax,%0,4),%0\n"
21257 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21258
21259 __asm__ __volatile__(
21260 " .align 2,0x90\n"
21261 - "0: movl 32(%4), %%eax\n"
21262 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21263 " cmpl $67, %0\n"
21264 " jbe 2f\n"
21265 - "1: movl 64(%4), %%eax\n"
21266 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21267 " .align 2,0x90\n"
21268 - "2: movl 0(%4), %%eax\n"
21269 - "21: movl 4(%4), %%edx\n"
21270 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21271 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21272 " movnti %%eax, 0(%3)\n"
21273 " movnti %%edx, 4(%3)\n"
21274 - "3: movl 8(%4), %%eax\n"
21275 - "31: movl 12(%4),%%edx\n"
21276 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21277 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21278 " movnti %%eax, 8(%3)\n"
21279 " movnti %%edx, 12(%3)\n"
21280 - "4: movl 16(%4), %%eax\n"
21281 - "41: movl 20(%4), %%edx\n"
21282 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21283 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21284 " movnti %%eax, 16(%3)\n"
21285 " movnti %%edx, 20(%3)\n"
21286 - "10: movl 24(%4), %%eax\n"
21287 - "51: movl 28(%4), %%edx\n"
21288 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21289 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21290 " movnti %%eax, 24(%3)\n"
21291 " movnti %%edx, 28(%3)\n"
21292 - "11: movl 32(%4), %%eax\n"
21293 - "61: movl 36(%4), %%edx\n"
21294 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21295 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21296 " movnti %%eax, 32(%3)\n"
21297 " movnti %%edx, 36(%3)\n"
21298 - "12: movl 40(%4), %%eax\n"
21299 - "71: movl 44(%4), %%edx\n"
21300 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21301 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21302 " movnti %%eax, 40(%3)\n"
21303 " movnti %%edx, 44(%3)\n"
21304 - "13: movl 48(%4), %%eax\n"
21305 - "81: movl 52(%4), %%edx\n"
21306 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21307 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21308 " movnti %%eax, 48(%3)\n"
21309 " movnti %%edx, 52(%3)\n"
21310 - "14: movl 56(%4), %%eax\n"
21311 - "91: movl 60(%4), %%edx\n"
21312 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21313 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21314 " movnti %%eax, 56(%3)\n"
21315 " movnti %%edx, 60(%3)\n"
21316 " addl $-64, %0\n"
21317 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21318 " shrl $2, %0\n"
21319 " andl $3, %%eax\n"
21320 " cld\n"
21321 - "6: rep; movsl\n"
21322 + "6: rep; "__copyuser_seg" movsl\n"
21323 " movl %%eax,%0\n"
21324 - "7: rep; movsb\n"
21325 + "7: rep; "__copyuser_seg" movsb\n"
21326 "8:\n"
21327 ".section .fixup,\"ax\"\n"
21328 "9: lea 0(%%eax,%0,4),%0\n"
21329 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21330
21331 __asm__ __volatile__(
21332 " .align 2,0x90\n"
21333 - "0: movl 32(%4), %%eax\n"
21334 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21335 " cmpl $67, %0\n"
21336 " jbe 2f\n"
21337 - "1: movl 64(%4), %%eax\n"
21338 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21339 " .align 2,0x90\n"
21340 - "2: movl 0(%4), %%eax\n"
21341 - "21: movl 4(%4), %%edx\n"
21342 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21343 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21344 " movnti %%eax, 0(%3)\n"
21345 " movnti %%edx, 4(%3)\n"
21346 - "3: movl 8(%4), %%eax\n"
21347 - "31: movl 12(%4),%%edx\n"
21348 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21349 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21350 " movnti %%eax, 8(%3)\n"
21351 " movnti %%edx, 12(%3)\n"
21352 - "4: movl 16(%4), %%eax\n"
21353 - "41: movl 20(%4), %%edx\n"
21354 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21355 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21356 " movnti %%eax, 16(%3)\n"
21357 " movnti %%edx, 20(%3)\n"
21358 - "10: movl 24(%4), %%eax\n"
21359 - "51: movl 28(%4), %%edx\n"
21360 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21361 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21362 " movnti %%eax, 24(%3)\n"
21363 " movnti %%edx, 28(%3)\n"
21364 - "11: movl 32(%4), %%eax\n"
21365 - "61: movl 36(%4), %%edx\n"
21366 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21367 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21368 " movnti %%eax, 32(%3)\n"
21369 " movnti %%edx, 36(%3)\n"
21370 - "12: movl 40(%4), %%eax\n"
21371 - "71: movl 44(%4), %%edx\n"
21372 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21373 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21374 " movnti %%eax, 40(%3)\n"
21375 " movnti %%edx, 44(%3)\n"
21376 - "13: movl 48(%4), %%eax\n"
21377 - "81: movl 52(%4), %%edx\n"
21378 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21379 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21380 " movnti %%eax, 48(%3)\n"
21381 " movnti %%edx, 52(%3)\n"
21382 - "14: movl 56(%4), %%eax\n"
21383 - "91: movl 60(%4), %%edx\n"
21384 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21385 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21386 " movnti %%eax, 56(%3)\n"
21387 " movnti %%edx, 60(%3)\n"
21388 " addl $-64, %0\n"
21389 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21390 " shrl $2, %0\n"
21391 " andl $3, %%eax\n"
21392 " cld\n"
21393 - "6: rep; movsl\n"
21394 + "6: rep; "__copyuser_seg" movsl\n"
21395 " movl %%eax,%0\n"
21396 - "7: rep; movsb\n"
21397 + "7: rep; "__copyuser_seg" movsb\n"
21398 "8:\n"
21399 ".section .fixup,\"ax\"\n"
21400 "9: lea 0(%%eax,%0,4),%0\n"
21401 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21402 */
21403 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21404 unsigned long size);
21405 -unsigned long __copy_user_intel(void __user *to, const void *from,
21406 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21407 + unsigned long size);
21408 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21409 unsigned long size);
21410 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21411 const void __user *from, unsigned long size);
21412 #endif /* CONFIG_X86_INTEL_USERCOPY */
21413
21414 /* Generic arbitrary sized copy. */
21415 -#define __copy_user(to, from, size) \
21416 +#define __copy_user(to, from, size, prefix, set, restore) \
21417 do { \
21418 int __d0, __d1, __d2; \
21419 __asm__ __volatile__( \
21420 + set \
21421 " cmp $7,%0\n" \
21422 " jbe 1f\n" \
21423 " movl %1,%0\n" \
21424 " negl %0\n" \
21425 " andl $7,%0\n" \
21426 " subl %0,%3\n" \
21427 - "4: rep; movsb\n" \
21428 + "4: rep; "prefix"movsb\n" \
21429 " movl %3,%0\n" \
21430 " shrl $2,%0\n" \
21431 " andl $3,%3\n" \
21432 " .align 2,0x90\n" \
21433 - "0: rep; movsl\n" \
21434 + "0: rep; "prefix"movsl\n" \
21435 " movl %3,%0\n" \
21436 - "1: rep; movsb\n" \
21437 + "1: rep; "prefix"movsb\n" \
21438 "2:\n" \
21439 + restore \
21440 ".section .fixup,\"ax\"\n" \
21441 "5: addl %3,%0\n" \
21442 " jmp 2b\n" \
21443 @@ -682,14 +799,14 @@ do { \
21444 " negl %0\n" \
21445 " andl $7,%0\n" \
21446 " subl %0,%3\n" \
21447 - "4: rep; movsb\n" \
21448 + "4: rep; "__copyuser_seg"movsb\n" \
21449 " movl %3,%0\n" \
21450 " shrl $2,%0\n" \
21451 " andl $3,%3\n" \
21452 " .align 2,0x90\n" \
21453 - "0: rep; movsl\n" \
21454 + "0: rep; "__copyuser_seg"movsl\n" \
21455 " movl %3,%0\n" \
21456 - "1: rep; movsb\n" \
21457 + "1: rep; "__copyuser_seg"movsb\n" \
21458 "2:\n" \
21459 ".section .fixup,\"ax\"\n" \
21460 "5: addl %3,%0\n" \
21461 @@ -775,9 +892,9 @@ survive:
21462 }
21463 #endif
21464 if (movsl_is_ok(to, from, n))
21465 - __copy_user(to, from, n);
21466 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21467 else
21468 - n = __copy_user_intel(to, from, n);
21469 + n = __generic_copy_to_user_intel(to, from, n);
21470 return n;
21471 }
21472 EXPORT_SYMBOL(__copy_to_user_ll);
21473 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21474 unsigned long n)
21475 {
21476 if (movsl_is_ok(to, from, n))
21477 - __copy_user(to, from, n);
21478 + __copy_user(to, from, n, __copyuser_seg, "", "");
21479 else
21480 - n = __copy_user_intel((void __user *)to,
21481 - (const void *)from, n);
21482 + n = __generic_copy_from_user_intel(to, from, n);
21483 return n;
21484 }
21485 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21486 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21487 if (n > 64 && cpu_has_xmm2)
21488 n = __copy_user_intel_nocache(to, from, n);
21489 else
21490 - __copy_user(to, from, n);
21491 + __copy_user(to, from, n, __copyuser_seg, "", "");
21492 #else
21493 - __copy_user(to, from, n);
21494 + __copy_user(to, from, n, __copyuser_seg, "", "");
21495 #endif
21496 return n;
21497 }
21498 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21499
21500 -/**
21501 - * copy_to_user: - Copy a block of data into user space.
21502 - * @to: Destination address, in user space.
21503 - * @from: Source address, in kernel space.
21504 - * @n: Number of bytes to copy.
21505 - *
21506 - * Context: User context only. This function may sleep.
21507 - *
21508 - * Copy data from kernel space to user space.
21509 - *
21510 - * Returns number of bytes that could not be copied.
21511 - * On success, this will be zero.
21512 - */
21513 -unsigned long
21514 -copy_to_user(void __user *to, const void *from, unsigned long n)
21515 -{
21516 - if (access_ok(VERIFY_WRITE, to, n))
21517 - n = __copy_to_user(to, from, n);
21518 - return n;
21519 -}
21520 -EXPORT_SYMBOL(copy_to_user);
21521 -
21522 -/**
21523 - * copy_from_user: - Copy a block of data from user space.
21524 - * @to: Destination address, in kernel space.
21525 - * @from: Source address, in user space.
21526 - * @n: Number of bytes to copy.
21527 - *
21528 - * Context: User context only. This function may sleep.
21529 - *
21530 - * Copy data from user space to kernel space.
21531 - *
21532 - * Returns number of bytes that could not be copied.
21533 - * On success, this will be zero.
21534 - *
21535 - * If some data could not be copied, this function will pad the copied
21536 - * data to the requested size using zero bytes.
21537 - */
21538 -unsigned long
21539 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21540 -{
21541 - if (access_ok(VERIFY_READ, from, n))
21542 - n = __copy_from_user(to, from, n);
21543 - else
21544 - memset(to, 0, n);
21545 - return n;
21546 -}
21547 -EXPORT_SYMBOL(_copy_from_user);
21548 -
21549 void copy_from_user_overflow(void)
21550 {
21551 WARN(1, "Buffer overflow detected!\n");
21552 }
21553 EXPORT_SYMBOL(copy_from_user_overflow);
21554 +
21555 +void copy_to_user_overflow(void)
21556 +{
21557 + WARN(1, "Buffer overflow detected!\n");
21558 +}
21559 +EXPORT_SYMBOL(copy_to_user_overflow);
21560 +
21561 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21562 +void __set_fs(mm_segment_t x)
21563 +{
21564 + switch (x.seg) {
21565 + case 0:
21566 + loadsegment(gs, 0);
21567 + break;
21568 + case TASK_SIZE_MAX:
21569 + loadsegment(gs, __USER_DS);
21570 + break;
21571 + case -1UL:
21572 + loadsegment(gs, __KERNEL_DS);
21573 + break;
21574 + default:
21575 + BUG();
21576 + }
21577 + return;
21578 +}
21579 +EXPORT_SYMBOL(__set_fs);
21580 +
21581 +void set_fs(mm_segment_t x)
21582 +{
21583 + current_thread_info()->addr_limit = x;
21584 + __set_fs(x);
21585 +}
21586 +EXPORT_SYMBOL(set_fs);
21587 +#endif
21588 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21589 index b7c2849..8633ad8 100644
21590 --- a/arch/x86/lib/usercopy_64.c
21591 +++ b/arch/x86/lib/usercopy_64.c
21592 @@ -42,6 +42,12 @@ long
21593 __strncpy_from_user(char *dst, const char __user *src, long count)
21594 {
21595 long res;
21596 +
21597 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21598 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21599 + src += PAX_USER_SHADOW_BASE;
21600 +#endif
21601 +
21602 __do_strncpy_from_user(dst, src, count, res);
21603 return res;
21604 }
21605 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21606 {
21607 long __d0;
21608 might_fault();
21609 +
21610 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21611 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21612 + addr += PAX_USER_SHADOW_BASE;
21613 +#endif
21614 +
21615 /* no memory constraint because it doesn't change any memory gcc knows
21616 about */
21617 asm volatile(
21618 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21619 }
21620 EXPORT_SYMBOL(strlen_user);
21621
21622 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21623 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21624 {
21625 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21626 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21627 - }
21628 - return len;
21629 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21630 +
21631 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21632 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21633 + to += PAX_USER_SHADOW_BASE;
21634 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21635 + from += PAX_USER_SHADOW_BASE;
21636 +#endif
21637 +
21638 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21639 + }
21640 + return len;
21641 }
21642 EXPORT_SYMBOL(copy_in_user);
21643
21644 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21645 * it is not necessary to optimize tail handling.
21646 */
21647 unsigned long
21648 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21649 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21650 {
21651 char c;
21652 unsigned zero_len;
21653 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21654 index d0474ad..36e9257 100644
21655 --- a/arch/x86/mm/extable.c
21656 +++ b/arch/x86/mm/extable.c
21657 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21658 const struct exception_table_entry *fixup;
21659
21660 #ifdef CONFIG_PNPBIOS
21661 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21662 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21663 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21664 extern u32 pnp_bios_is_utter_crap;
21665 pnp_bios_is_utter_crap = 1;
21666 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21667 index 5db0490..2ddce45 100644
21668 --- a/arch/x86/mm/fault.c
21669 +++ b/arch/x86/mm/fault.c
21670 @@ -13,11 +13,18 @@
21671 #include <linux/perf_event.h> /* perf_sw_event */
21672 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21673 #include <linux/prefetch.h> /* prefetchw */
21674 +#include <linux/unistd.h>
21675 +#include <linux/compiler.h>
21676
21677 #include <asm/traps.h> /* dotraplinkage, ... */
21678 #include <asm/pgalloc.h> /* pgd_*(), ... */
21679 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21680 #include <asm/fixmap.h> /* VSYSCALL_START */
21681 +#include <asm/tlbflush.h>
21682 +
21683 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21684 +#include <asm/stacktrace.h>
21685 +#endif
21686
21687 /*
21688 * Page fault error code bits:
21689 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21690 int ret = 0;
21691
21692 /* kprobe_running() needs smp_processor_id() */
21693 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21694 + if (kprobes_built_in() && !user_mode(regs)) {
21695 preempt_disable();
21696 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21697 ret = 1;
21698 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21699 return !instr_lo || (instr_lo>>1) == 1;
21700 case 0x00:
21701 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21702 - if (probe_kernel_address(instr, opcode))
21703 + if (user_mode(regs)) {
21704 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21705 + return 0;
21706 + } else if (probe_kernel_address(instr, opcode))
21707 return 0;
21708
21709 *prefetch = (instr_lo == 0xF) &&
21710 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21711 while (instr < max_instr) {
21712 unsigned char opcode;
21713
21714 - if (probe_kernel_address(instr, opcode))
21715 + if (user_mode(regs)) {
21716 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21717 + break;
21718 + } else if (probe_kernel_address(instr, opcode))
21719 break;
21720
21721 instr++;
21722 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21723 force_sig_info(si_signo, &info, tsk);
21724 }
21725
21726 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21727 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21728 +#endif
21729 +
21730 +#ifdef CONFIG_PAX_EMUTRAMP
21731 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21732 +#endif
21733 +
21734 +#ifdef CONFIG_PAX_PAGEEXEC
21735 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21736 +{
21737 + pgd_t *pgd;
21738 + pud_t *pud;
21739 + pmd_t *pmd;
21740 +
21741 + pgd = pgd_offset(mm, address);
21742 + if (!pgd_present(*pgd))
21743 + return NULL;
21744 + pud = pud_offset(pgd, address);
21745 + if (!pud_present(*pud))
21746 + return NULL;
21747 + pmd = pmd_offset(pud, address);
21748 + if (!pmd_present(*pmd))
21749 + return NULL;
21750 + return pmd;
21751 +}
21752 +#endif
21753 +
21754 DEFINE_SPINLOCK(pgd_lock);
21755 LIST_HEAD(pgd_list);
21756
21757 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21758 for (address = VMALLOC_START & PMD_MASK;
21759 address >= TASK_SIZE && address < FIXADDR_TOP;
21760 address += PMD_SIZE) {
21761 +
21762 +#ifdef CONFIG_PAX_PER_CPU_PGD
21763 + unsigned long cpu;
21764 +#else
21765 struct page *page;
21766 +#endif
21767
21768 spin_lock(&pgd_lock);
21769 +
21770 +#ifdef CONFIG_PAX_PER_CPU_PGD
21771 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
21772 + pgd_t *pgd = get_cpu_pgd(cpu);
21773 + pmd_t *ret;
21774 +#else
21775 list_for_each_entry(page, &pgd_list, lru) {
21776 + pgd_t *pgd = page_address(page);
21777 spinlock_t *pgt_lock;
21778 pmd_t *ret;
21779
21780 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21781 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21782
21783 spin_lock(pgt_lock);
21784 - ret = vmalloc_sync_one(page_address(page), address);
21785 +#endif
21786 +
21787 + ret = vmalloc_sync_one(pgd, address);
21788 +
21789 +#ifndef CONFIG_PAX_PER_CPU_PGD
21790 spin_unlock(pgt_lock);
21791 +#endif
21792
21793 if (!ret)
21794 break;
21795 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21796 * an interrupt in the middle of a task switch..
21797 */
21798 pgd_paddr = read_cr3();
21799 +
21800 +#ifdef CONFIG_PAX_PER_CPU_PGD
21801 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21802 +#endif
21803 +
21804 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21805 if (!pmd_k)
21806 return -1;
21807 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21808 * happen within a race in page table update. In the later
21809 * case just flush:
21810 */
21811 +
21812 +#ifdef CONFIG_PAX_PER_CPU_PGD
21813 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21814 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21815 +#else
21816 pgd = pgd_offset(current->active_mm, address);
21817 +#endif
21818 +
21819 pgd_ref = pgd_offset_k(address);
21820 if (pgd_none(*pgd_ref))
21821 return -1;
21822 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21823 static int is_errata100(struct pt_regs *regs, unsigned long address)
21824 {
21825 #ifdef CONFIG_X86_64
21826 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21827 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21828 return 1;
21829 #endif
21830 return 0;
21831 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21832 }
21833
21834 static const char nx_warning[] = KERN_CRIT
21835 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21836 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21837
21838 static void
21839 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21840 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21841 if (!oops_may_print())
21842 return;
21843
21844 - if (error_code & PF_INSTR) {
21845 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21846 unsigned int level;
21847
21848 pte_t *pte = lookup_address(address, &level);
21849
21850 if (pte && pte_present(*pte) && !pte_exec(*pte))
21851 - printk(nx_warning, current_uid());
21852 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21853 }
21854
21855 +#ifdef CONFIG_PAX_KERNEXEC
21856 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21857 + if (current->signal->curr_ip)
21858 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21859 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21860 + else
21861 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21862 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21863 + }
21864 +#endif
21865 +
21866 printk(KERN_ALERT "BUG: unable to handle kernel ");
21867 if (address < PAGE_SIZE)
21868 printk(KERN_CONT "NULL pointer dereference");
21869 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21870 }
21871 #endif
21872
21873 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21874 + if (pax_is_fetch_fault(regs, error_code, address)) {
21875 +
21876 +#ifdef CONFIG_PAX_EMUTRAMP
21877 + switch (pax_handle_fetch_fault(regs)) {
21878 + case 2:
21879 + return;
21880 + }
21881 +#endif
21882 +
21883 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21884 + do_group_exit(SIGKILL);
21885 + }
21886 +#endif
21887 +
21888 if (unlikely(show_unhandled_signals))
21889 show_signal_msg(regs, error_code, address, tsk);
21890
21891 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21892 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21893 printk(KERN_ERR
21894 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21895 - tsk->comm, tsk->pid, address);
21896 + tsk->comm, task_pid_nr(tsk), address);
21897 code = BUS_MCEERR_AR;
21898 }
21899 #endif
21900 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21901 return 1;
21902 }
21903
21904 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21905 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21906 +{
21907 + pte_t *pte;
21908 + pmd_t *pmd;
21909 + spinlock_t *ptl;
21910 + unsigned char pte_mask;
21911 +
21912 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21913 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21914 + return 0;
21915 +
21916 + /* PaX: it's our fault, let's handle it if we can */
21917 +
21918 + /* PaX: take a look at read faults before acquiring any locks */
21919 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21920 + /* instruction fetch attempt from a protected page in user mode */
21921 + up_read(&mm->mmap_sem);
21922 +
21923 +#ifdef CONFIG_PAX_EMUTRAMP
21924 + switch (pax_handle_fetch_fault(regs)) {
21925 + case 2:
21926 + return 1;
21927 + }
21928 +#endif
21929 +
21930 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21931 + do_group_exit(SIGKILL);
21932 + }
21933 +
21934 + pmd = pax_get_pmd(mm, address);
21935 + if (unlikely(!pmd))
21936 + return 0;
21937 +
21938 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21939 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21940 + pte_unmap_unlock(pte, ptl);
21941 + return 0;
21942 + }
21943 +
21944 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21945 + /* write attempt to a protected page in user mode */
21946 + pte_unmap_unlock(pte, ptl);
21947 + return 0;
21948 + }
21949 +
21950 +#ifdef CONFIG_SMP
21951 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21952 +#else
21953 + if (likely(address > get_limit(regs->cs)))
21954 +#endif
21955 + {
21956 + set_pte(pte, pte_mkread(*pte));
21957 + __flush_tlb_one(address);
21958 + pte_unmap_unlock(pte, ptl);
21959 + up_read(&mm->mmap_sem);
21960 + return 1;
21961 + }
21962 +
21963 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21964 +
21965 + /*
21966 + * PaX: fill DTLB with user rights and retry
21967 + */
21968 + __asm__ __volatile__ (
21969 + "orb %2,(%1)\n"
21970 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21971 +/*
21972 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21973 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21974 + * page fault when examined during a TLB load attempt. this is true not only
21975 + * for PTEs holding a non-present entry but also present entries that will
21976 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21977 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21978 + * for our target pages since their PTEs are simply not in the TLBs at all.
21979 +
21980 + * the best thing in omitting it is that we gain around 15-20% speed in the
21981 + * fast path of the page fault handler and can get rid of tracing since we
21982 + * can no longer flush unintended entries.
21983 + */
21984 + "invlpg (%0)\n"
21985 +#endif
21986 + __copyuser_seg"testb $0,(%0)\n"
21987 + "xorb %3,(%1)\n"
21988 + :
21989 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21990 + : "memory", "cc");
21991 + pte_unmap_unlock(pte, ptl);
21992 + up_read(&mm->mmap_sem);
21993 + return 1;
21994 +}
21995 +#endif
21996 +
21997 /*
21998 * Handle a spurious fault caused by a stale TLB entry.
21999 *
22000 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
22001 static inline int
22002 access_error(unsigned long error_code, struct vm_area_struct *vma)
22003 {
22004 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
22005 + return 1;
22006 +
22007 if (error_code & PF_WRITE) {
22008 /* write, present and write, not present: */
22009 if (unlikely(!(vma->vm_flags & VM_WRITE)))
22010 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22011 {
22012 struct vm_area_struct *vma;
22013 struct task_struct *tsk;
22014 - unsigned long address;
22015 struct mm_struct *mm;
22016 int fault;
22017 int write = error_code & PF_WRITE;
22018 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
22019 (write ? FAULT_FLAG_WRITE : 0);
22020
22021 - tsk = current;
22022 - mm = tsk->mm;
22023 -
22024 /* Get the faulting address: */
22025 - address = read_cr2();
22026 + unsigned long address = read_cr2();
22027 +
22028 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22029 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
22030 + if (!search_exception_tables(regs->ip)) {
22031 + bad_area_nosemaphore(regs, error_code, address);
22032 + return;
22033 + }
22034 + if (address < PAX_USER_SHADOW_BASE) {
22035 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
22036 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
22037 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
22038 + } else
22039 + address -= PAX_USER_SHADOW_BASE;
22040 + }
22041 +#endif
22042 +
22043 + tsk = current;
22044 + mm = tsk->mm;
22045
22046 /*
22047 * Detect and handle instructions that would cause a page fault for
22048 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22049 * User-mode registers count as a user access even for any
22050 * potential system fault or CPU buglet:
22051 */
22052 - if (user_mode_vm(regs)) {
22053 + if (user_mode(regs)) {
22054 local_irq_enable();
22055 error_code |= PF_USER;
22056 } else {
22057 @@ -1122,6 +1328,11 @@ retry:
22058 might_sleep();
22059 }
22060
22061 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22062 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
22063 + return;
22064 +#endif
22065 +
22066 vma = find_vma(mm, address);
22067 if (unlikely(!vma)) {
22068 bad_area(regs, error_code, address);
22069 @@ -1133,18 +1344,24 @@ retry:
22070 bad_area(regs, error_code, address);
22071 return;
22072 }
22073 - if (error_code & PF_USER) {
22074 - /*
22075 - * Accessing the stack below %sp is always a bug.
22076 - * The large cushion allows instructions like enter
22077 - * and pusha to work. ("enter $65535, $31" pushes
22078 - * 32 pointers and then decrements %sp by 65535.)
22079 - */
22080 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
22081 - bad_area(regs, error_code, address);
22082 - return;
22083 - }
22084 + /*
22085 + * Accessing the stack below %sp is always a bug.
22086 + * The large cushion allows instructions like enter
22087 + * and pusha to work. ("enter $65535, $31" pushes
22088 + * 32 pointers and then decrements %sp by 65535.)
22089 + */
22090 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
22091 + bad_area(regs, error_code, address);
22092 + return;
22093 }
22094 +
22095 +#ifdef CONFIG_PAX_SEGMEXEC
22096 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
22097 + bad_area(regs, error_code, address);
22098 + return;
22099 + }
22100 +#endif
22101 +
22102 if (unlikely(expand_stack(vma, address))) {
22103 bad_area(regs, error_code, address);
22104 return;
22105 @@ -1199,3 +1416,292 @@ good_area:
22106
22107 up_read(&mm->mmap_sem);
22108 }
22109 +
22110 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22111 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
22112 +{
22113 + struct mm_struct *mm = current->mm;
22114 + unsigned long ip = regs->ip;
22115 +
22116 + if (v8086_mode(regs))
22117 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
22118 +
22119 +#ifdef CONFIG_PAX_PAGEEXEC
22120 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
22121 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
22122 + return true;
22123 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
22124 + return true;
22125 + return false;
22126 + }
22127 +#endif
22128 +
22129 +#ifdef CONFIG_PAX_SEGMEXEC
22130 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
22131 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
22132 + return true;
22133 + return false;
22134 + }
22135 +#endif
22136 +
22137 + return false;
22138 +}
22139 +#endif
22140 +
22141 +#ifdef CONFIG_PAX_EMUTRAMP
22142 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
22143 +{
22144 + int err;
22145 +
22146 + do { /* PaX: libffi trampoline emulation */
22147 + unsigned char mov, jmp;
22148 + unsigned int addr1, addr2;
22149 +
22150 +#ifdef CONFIG_X86_64
22151 + if ((regs->ip + 9) >> 32)
22152 + break;
22153 +#endif
22154 +
22155 + err = get_user(mov, (unsigned char __user *)regs->ip);
22156 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22157 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22158 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22159 +
22160 + if (err)
22161 + break;
22162 +
22163 + if (mov == 0xB8 && jmp == 0xE9) {
22164 + regs->ax = addr1;
22165 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22166 + return 2;
22167 + }
22168 + } while (0);
22169 +
22170 + do { /* PaX: gcc trampoline emulation #1 */
22171 + unsigned char mov1, mov2;
22172 + unsigned short jmp;
22173 + unsigned int addr1, addr2;
22174 +
22175 +#ifdef CONFIG_X86_64
22176 + if ((regs->ip + 11) >> 32)
22177 + break;
22178 +#endif
22179 +
22180 + err = get_user(mov1, (unsigned char __user *)regs->ip);
22181 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22182 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
22183 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22184 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
22185 +
22186 + if (err)
22187 + break;
22188 +
22189 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
22190 + regs->cx = addr1;
22191 + regs->ax = addr2;
22192 + regs->ip = addr2;
22193 + return 2;
22194 + }
22195 + } while (0);
22196 +
22197 + do { /* PaX: gcc trampoline emulation #2 */
22198 + unsigned char mov, jmp;
22199 + unsigned int addr1, addr2;
22200 +
22201 +#ifdef CONFIG_X86_64
22202 + if ((regs->ip + 9) >> 32)
22203 + break;
22204 +#endif
22205 +
22206 + err = get_user(mov, (unsigned char __user *)regs->ip);
22207 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22208 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22209 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22210 +
22211 + if (err)
22212 + break;
22213 +
22214 + if (mov == 0xB9 && jmp == 0xE9) {
22215 + regs->cx = addr1;
22216 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22217 + return 2;
22218 + }
22219 + } while (0);
22220 +
22221 + return 1; /* PaX in action */
22222 +}
22223 +
22224 +#ifdef CONFIG_X86_64
22225 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
22226 +{
22227 + int err;
22228 +
22229 + do { /* PaX: libffi trampoline emulation */
22230 + unsigned short mov1, mov2, jmp1;
22231 + unsigned char stcclc, jmp2;
22232 + unsigned long addr1, addr2;
22233 +
22234 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22235 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22236 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22237 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22238 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22239 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22240 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22241 +
22242 + if (err)
22243 + break;
22244 +
22245 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22246 + regs->r11 = addr1;
22247 + regs->r10 = addr2;
22248 + if (stcclc == 0xF8)
22249 + regs->flags &= ~X86_EFLAGS_CF;
22250 + else
22251 + regs->flags |= X86_EFLAGS_CF;
22252 + regs->ip = addr1;
22253 + return 2;
22254 + }
22255 + } while (0);
22256 +
22257 + do { /* PaX: gcc trampoline emulation #1 */
22258 + unsigned short mov1, mov2, jmp1;
22259 + unsigned char jmp2;
22260 + unsigned int addr1;
22261 + unsigned long addr2;
22262 +
22263 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22264 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22265 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22266 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22267 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22268 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22269 +
22270 + if (err)
22271 + break;
22272 +
22273 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22274 + regs->r11 = addr1;
22275 + regs->r10 = addr2;
22276 + regs->ip = addr1;
22277 + return 2;
22278 + }
22279 + } while (0);
22280 +
22281 + do { /* PaX: gcc trampoline emulation #2 */
22282 + unsigned short mov1, mov2, jmp1;
22283 + unsigned char jmp2;
22284 + unsigned long addr1, addr2;
22285 +
22286 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22287 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22288 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22289 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22290 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22291 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22292 +
22293 + if (err)
22294 + break;
22295 +
22296 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22297 + regs->r11 = addr1;
22298 + regs->r10 = addr2;
22299 + regs->ip = addr1;
22300 + return 2;
22301 + }
22302 + } while (0);
22303 +
22304 + return 1; /* PaX in action */
22305 +}
22306 +#endif
22307 +
22308 +/*
22309 + * PaX: decide what to do with offenders (regs->ip = fault address)
22310 + *
22311 + * returns 1 when task should be killed
22312 + * 2 when gcc trampoline was detected
22313 + */
22314 +static int pax_handle_fetch_fault(struct pt_regs *regs)
22315 +{
22316 + if (v8086_mode(regs))
22317 + return 1;
22318 +
22319 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22320 + return 1;
22321 +
22322 +#ifdef CONFIG_X86_32
22323 + return pax_handle_fetch_fault_32(regs);
22324 +#else
22325 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22326 + return pax_handle_fetch_fault_32(regs);
22327 + else
22328 + return pax_handle_fetch_fault_64(regs);
22329 +#endif
22330 +}
22331 +#endif
22332 +
22333 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22334 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22335 +{
22336 + long i;
22337 +
22338 + printk(KERN_ERR "PAX: bytes at PC: ");
22339 + for (i = 0; i < 20; i++) {
22340 + unsigned char c;
22341 + if (get_user(c, (unsigned char __force_user *)pc+i))
22342 + printk(KERN_CONT "?? ");
22343 + else
22344 + printk(KERN_CONT "%02x ", c);
22345 + }
22346 + printk("\n");
22347 +
22348 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22349 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22350 + unsigned long c;
22351 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22352 +#ifdef CONFIG_X86_32
22353 + printk(KERN_CONT "???????? ");
22354 +#else
22355 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22356 + printk(KERN_CONT "???????? ???????? ");
22357 + else
22358 + printk(KERN_CONT "???????????????? ");
22359 +#endif
22360 + } else {
22361 +#ifdef CONFIG_X86_64
22362 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22363 + printk(KERN_CONT "%08x ", (unsigned int)c);
22364 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22365 + } else
22366 +#endif
22367 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22368 + }
22369 + }
22370 + printk("\n");
22371 +}
22372 +#endif
22373 +
22374 +/**
22375 + * probe_kernel_write(): safely attempt to write to a location
22376 + * @dst: address to write to
22377 + * @src: pointer to the data that shall be written
22378 + * @size: size of the data chunk
22379 + *
22380 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22381 + * happens, handle that and return -EFAULT.
22382 + */
22383 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22384 +{
22385 + long ret;
22386 + mm_segment_t old_fs = get_fs();
22387 +
22388 + set_fs(KERNEL_DS);
22389 + pagefault_disable();
22390 + pax_open_kernel();
22391 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22392 + pax_close_kernel();
22393 + pagefault_enable();
22394 + set_fs(old_fs);
22395 +
22396 + return ret ? -EFAULT : 0;
22397 +}
22398 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22399 index dd74e46..7d26398 100644
22400 --- a/arch/x86/mm/gup.c
22401 +++ b/arch/x86/mm/gup.c
22402 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22403 addr = start;
22404 len = (unsigned long) nr_pages << PAGE_SHIFT;
22405 end = start + len;
22406 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22407 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22408 (void __user *)start, len)))
22409 return 0;
22410
22411 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22412 index f4f29b1..5cac4fb 100644
22413 --- a/arch/x86/mm/highmem_32.c
22414 +++ b/arch/x86/mm/highmem_32.c
22415 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22416 idx = type + KM_TYPE_NR*smp_processor_id();
22417 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22418 BUG_ON(!pte_none(*(kmap_pte-idx)));
22419 +
22420 + pax_open_kernel();
22421 set_pte(kmap_pte-idx, mk_pte(page, prot));
22422 + pax_close_kernel();
22423 +
22424 arch_flush_lazy_mmu_mode();
22425
22426 return (void *)vaddr;
22427 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22428 index f581a18..29efd37 100644
22429 --- a/arch/x86/mm/hugetlbpage.c
22430 +++ b/arch/x86/mm/hugetlbpage.c
22431 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22432 struct hstate *h = hstate_file(file);
22433 struct mm_struct *mm = current->mm;
22434 struct vm_area_struct *vma;
22435 - unsigned long start_addr;
22436 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22437 +
22438 +#ifdef CONFIG_PAX_SEGMEXEC
22439 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22440 + pax_task_size = SEGMEXEC_TASK_SIZE;
22441 +#endif
22442 +
22443 + pax_task_size -= PAGE_SIZE;
22444
22445 if (len > mm->cached_hole_size) {
22446 - start_addr = mm->free_area_cache;
22447 + start_addr = mm->free_area_cache;
22448 } else {
22449 - start_addr = TASK_UNMAPPED_BASE;
22450 - mm->cached_hole_size = 0;
22451 + start_addr = mm->mmap_base;
22452 + mm->cached_hole_size = 0;
22453 }
22454
22455 full_search:
22456 @@ -280,26 +287,27 @@ full_search:
22457
22458 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22459 /* At this point: (!vma || addr < vma->vm_end). */
22460 - if (TASK_SIZE - len < addr) {
22461 + if (pax_task_size - len < addr) {
22462 /*
22463 * Start a new search - just in case we missed
22464 * some holes.
22465 */
22466 - if (start_addr != TASK_UNMAPPED_BASE) {
22467 - start_addr = TASK_UNMAPPED_BASE;
22468 + if (start_addr != mm->mmap_base) {
22469 + start_addr = mm->mmap_base;
22470 mm->cached_hole_size = 0;
22471 goto full_search;
22472 }
22473 return -ENOMEM;
22474 }
22475 - if (!vma || addr + len <= vma->vm_start) {
22476 - mm->free_area_cache = addr + len;
22477 - return addr;
22478 - }
22479 + if (check_heap_stack_gap(vma, addr, len))
22480 + break;
22481 if (addr + mm->cached_hole_size < vma->vm_start)
22482 mm->cached_hole_size = vma->vm_start - addr;
22483 addr = ALIGN(vma->vm_end, huge_page_size(h));
22484 }
22485 +
22486 + mm->free_area_cache = addr + len;
22487 + return addr;
22488 }
22489
22490 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22491 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22492 {
22493 struct hstate *h = hstate_file(file);
22494 struct mm_struct *mm = current->mm;
22495 - struct vm_area_struct *vma, *prev_vma;
22496 - unsigned long base = mm->mmap_base, addr = addr0;
22497 + struct vm_area_struct *vma;
22498 + unsigned long base = mm->mmap_base, addr;
22499 unsigned long largest_hole = mm->cached_hole_size;
22500 - int first_time = 1;
22501
22502 /* don't allow allocations above current base */
22503 if (mm->free_area_cache > base)
22504 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22505 largest_hole = 0;
22506 mm->free_area_cache = base;
22507 }
22508 -try_again:
22509 +
22510 /* make sure it can fit in the remaining address space */
22511 if (mm->free_area_cache < len)
22512 goto fail;
22513
22514 /* either no address requested or can't fit in requested address hole */
22515 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22516 + addr = (mm->free_area_cache - len);
22517 do {
22518 + addr &= huge_page_mask(h);
22519 + vma = find_vma(mm, addr);
22520 /*
22521 * Lookup failure means no vma is above this address,
22522 * i.e. return with success:
22523 - */
22524 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22525 - return addr;
22526 -
22527 - /*
22528 * new region fits between prev_vma->vm_end and
22529 * vma->vm_start, use it:
22530 */
22531 - if (addr + len <= vma->vm_start &&
22532 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22533 + if (check_heap_stack_gap(vma, addr, len)) {
22534 /* remember the address as a hint for next time */
22535 - mm->cached_hole_size = largest_hole;
22536 - return (mm->free_area_cache = addr);
22537 - } else {
22538 - /* pull free_area_cache down to the first hole */
22539 - if (mm->free_area_cache == vma->vm_end) {
22540 - mm->free_area_cache = vma->vm_start;
22541 - mm->cached_hole_size = largest_hole;
22542 - }
22543 + mm->cached_hole_size = largest_hole;
22544 + return (mm->free_area_cache = addr);
22545 + }
22546 + /* pull free_area_cache down to the first hole */
22547 + if (mm->free_area_cache == vma->vm_end) {
22548 + mm->free_area_cache = vma->vm_start;
22549 + mm->cached_hole_size = largest_hole;
22550 }
22551
22552 /* remember the largest hole we saw so far */
22553 if (addr + largest_hole < vma->vm_start)
22554 - largest_hole = vma->vm_start - addr;
22555 + largest_hole = vma->vm_start - addr;
22556
22557 /* try just below the current vma->vm_start */
22558 - addr = (vma->vm_start - len) & huge_page_mask(h);
22559 - } while (len <= vma->vm_start);
22560 + addr = skip_heap_stack_gap(vma, len);
22561 + } while (!IS_ERR_VALUE(addr));
22562
22563 fail:
22564 /*
22565 - * if hint left us with no space for the requested
22566 - * mapping then try again:
22567 - */
22568 - if (first_time) {
22569 - mm->free_area_cache = base;
22570 - largest_hole = 0;
22571 - first_time = 0;
22572 - goto try_again;
22573 - }
22574 - /*
22575 * A failed mmap() very likely causes application failure,
22576 * so fall back to the bottom-up function here. This scenario
22577 * can happen with large stack limits and large mmap()
22578 * allocations.
22579 */
22580 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22581 +
22582 +#ifdef CONFIG_PAX_SEGMEXEC
22583 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22584 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22585 + else
22586 +#endif
22587 +
22588 + mm->mmap_base = TASK_UNMAPPED_BASE;
22589 +
22590 +#ifdef CONFIG_PAX_RANDMMAP
22591 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22592 + mm->mmap_base += mm->delta_mmap;
22593 +#endif
22594 +
22595 + mm->free_area_cache = mm->mmap_base;
22596 mm->cached_hole_size = ~0UL;
22597 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22598 len, pgoff, flags);
22599 @@ -386,6 +392,7 @@ fail:
22600 /*
22601 * Restore the topdown base:
22602 */
22603 + mm->mmap_base = base;
22604 mm->free_area_cache = base;
22605 mm->cached_hole_size = ~0UL;
22606
22607 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22608 struct hstate *h = hstate_file(file);
22609 struct mm_struct *mm = current->mm;
22610 struct vm_area_struct *vma;
22611 + unsigned long pax_task_size = TASK_SIZE;
22612
22613 if (len & ~huge_page_mask(h))
22614 return -EINVAL;
22615 - if (len > TASK_SIZE)
22616 +
22617 +#ifdef CONFIG_PAX_SEGMEXEC
22618 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22619 + pax_task_size = SEGMEXEC_TASK_SIZE;
22620 +#endif
22621 +
22622 + pax_task_size -= PAGE_SIZE;
22623 +
22624 + if (len > pax_task_size)
22625 return -ENOMEM;
22626
22627 if (flags & MAP_FIXED) {
22628 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22629 if (addr) {
22630 addr = ALIGN(addr, huge_page_size(h));
22631 vma = find_vma(mm, addr);
22632 - if (TASK_SIZE - len >= addr &&
22633 - (!vma || addr + len <= vma->vm_start))
22634 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22635 return addr;
22636 }
22637 if (mm->get_unmapped_area == arch_get_unmapped_area)
22638 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22639 index 87488b9..399f416 100644
22640 --- a/arch/x86/mm/init.c
22641 +++ b/arch/x86/mm/init.c
22642 @@ -15,6 +15,7 @@
22643 #include <asm/tlbflush.h>
22644 #include <asm/tlb.h>
22645 #include <asm/proto.h>
22646 +#include <asm/desc.h>
22647
22648 unsigned long __initdata pgt_buf_start;
22649 unsigned long __meminitdata pgt_buf_end;
22650 @@ -31,7 +32,7 @@ int direct_gbpages
22651 static void __init find_early_table_space(unsigned long end, int use_pse,
22652 int use_gbpages)
22653 {
22654 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22655 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22656 phys_addr_t base;
22657
22658 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22659 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22660 */
22661 int devmem_is_allowed(unsigned long pagenr)
22662 {
22663 +#ifdef CONFIG_GRKERNSEC_KMEM
22664 + /* allow BDA */
22665 + if (!pagenr)
22666 + return 1;
22667 + /* allow EBDA */
22668 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22669 + return 1;
22670 +#else
22671 + if (!pagenr)
22672 + return 1;
22673 +#ifdef CONFIG_VM86
22674 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22675 + return 1;
22676 +#endif
22677 +#endif
22678 +
22679 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22680 + return 1;
22681 +#ifdef CONFIG_GRKERNSEC_KMEM
22682 + /* throw out everything else below 1MB */
22683 if (pagenr <= 256)
22684 - return 1;
22685 + return 0;
22686 +#endif
22687 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22688 return 0;
22689 if (!page_is_ram(pagenr))
22690 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22691
22692 void free_initmem(void)
22693 {
22694 +
22695 +#ifdef CONFIG_PAX_KERNEXEC
22696 +#ifdef CONFIG_X86_32
22697 + /* PaX: limit KERNEL_CS to actual size */
22698 + unsigned long addr, limit;
22699 + struct desc_struct d;
22700 + int cpu;
22701 +
22702 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22703 + limit = (limit - 1UL) >> PAGE_SHIFT;
22704 +
22705 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22706 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
22707 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22708 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22709 + }
22710 +
22711 + /* PaX: make KERNEL_CS read-only */
22712 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22713 + if (!paravirt_enabled())
22714 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22715 +/*
22716 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22717 + pgd = pgd_offset_k(addr);
22718 + pud = pud_offset(pgd, addr);
22719 + pmd = pmd_offset(pud, addr);
22720 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22721 + }
22722 +*/
22723 +#ifdef CONFIG_X86_PAE
22724 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22725 +/*
22726 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22727 + pgd = pgd_offset_k(addr);
22728 + pud = pud_offset(pgd, addr);
22729 + pmd = pmd_offset(pud, addr);
22730 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22731 + }
22732 +*/
22733 +#endif
22734 +
22735 +#ifdef CONFIG_MODULES
22736 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22737 +#endif
22738 +
22739 +#else
22740 + pgd_t *pgd;
22741 + pud_t *pud;
22742 + pmd_t *pmd;
22743 + unsigned long addr, end;
22744 +
22745 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22746 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22747 + pgd = pgd_offset_k(addr);
22748 + pud = pud_offset(pgd, addr);
22749 + pmd = pmd_offset(pud, addr);
22750 + if (!pmd_present(*pmd))
22751 + continue;
22752 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22753 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22754 + else
22755 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22756 + }
22757 +
22758 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22759 + end = addr + KERNEL_IMAGE_SIZE;
22760 + for (; addr < end; addr += PMD_SIZE) {
22761 + pgd = pgd_offset_k(addr);
22762 + pud = pud_offset(pgd, addr);
22763 + pmd = pmd_offset(pud, addr);
22764 + if (!pmd_present(*pmd))
22765 + continue;
22766 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22767 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22768 + }
22769 +#endif
22770 +
22771 + flush_tlb_all();
22772 +#endif
22773 +
22774 free_init_pages("unused kernel memory",
22775 (unsigned long)(&__init_begin),
22776 (unsigned long)(&__init_end));
22777 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22778 index 29f7c6d..b46b35b 100644
22779 --- a/arch/x86/mm/init_32.c
22780 +++ b/arch/x86/mm/init_32.c
22781 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22782 }
22783
22784 /*
22785 - * Creates a middle page table and puts a pointer to it in the
22786 - * given global directory entry. This only returns the gd entry
22787 - * in non-PAE compilation mode, since the middle layer is folded.
22788 - */
22789 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22790 -{
22791 - pud_t *pud;
22792 - pmd_t *pmd_table;
22793 -
22794 -#ifdef CONFIG_X86_PAE
22795 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22796 - if (after_bootmem)
22797 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22798 - else
22799 - pmd_table = (pmd_t *)alloc_low_page();
22800 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22801 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22802 - pud = pud_offset(pgd, 0);
22803 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22804 -
22805 - return pmd_table;
22806 - }
22807 -#endif
22808 - pud = pud_offset(pgd, 0);
22809 - pmd_table = pmd_offset(pud, 0);
22810 -
22811 - return pmd_table;
22812 -}
22813 -
22814 -/*
22815 * Create a page table and place a pointer to it in a middle page
22816 * directory entry:
22817 */
22818 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22819 page_table = (pte_t *)alloc_low_page();
22820
22821 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22822 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22823 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22824 +#else
22825 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22826 +#endif
22827 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22828 }
22829
22830 return pte_offset_kernel(pmd, 0);
22831 }
22832
22833 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22834 +{
22835 + pud_t *pud;
22836 + pmd_t *pmd_table;
22837 +
22838 + pud = pud_offset(pgd, 0);
22839 + pmd_table = pmd_offset(pud, 0);
22840 +
22841 + return pmd_table;
22842 +}
22843 +
22844 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22845 {
22846 int pgd_idx = pgd_index(vaddr);
22847 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22848 int pgd_idx, pmd_idx;
22849 unsigned long vaddr;
22850 pgd_t *pgd;
22851 + pud_t *pud;
22852 pmd_t *pmd;
22853 pte_t *pte = NULL;
22854
22855 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22856 pgd = pgd_base + pgd_idx;
22857
22858 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22859 - pmd = one_md_table_init(pgd);
22860 - pmd = pmd + pmd_index(vaddr);
22861 + pud = pud_offset(pgd, vaddr);
22862 + pmd = pmd_offset(pud, vaddr);
22863 +
22864 +#ifdef CONFIG_X86_PAE
22865 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22866 +#endif
22867 +
22868 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22869 pmd++, pmd_idx++) {
22870 pte = page_table_kmap_check(one_page_table_init(pmd),
22871 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22872 }
22873 }
22874
22875 -static inline int is_kernel_text(unsigned long addr)
22876 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22877 {
22878 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22879 - return 1;
22880 - return 0;
22881 + if ((start > ktla_ktva((unsigned long)_etext) ||
22882 + end <= ktla_ktva((unsigned long)_stext)) &&
22883 + (start > ktla_ktva((unsigned long)_einittext) ||
22884 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22885 +
22886 +#ifdef CONFIG_ACPI_SLEEP
22887 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22888 +#endif
22889 +
22890 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22891 + return 0;
22892 + return 1;
22893 }
22894
22895 /*
22896 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22897 unsigned long last_map_addr = end;
22898 unsigned long start_pfn, end_pfn;
22899 pgd_t *pgd_base = swapper_pg_dir;
22900 - int pgd_idx, pmd_idx, pte_ofs;
22901 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22902 unsigned long pfn;
22903 pgd_t *pgd;
22904 + pud_t *pud;
22905 pmd_t *pmd;
22906 pte_t *pte;
22907 unsigned pages_2m, pages_4k;
22908 @@ -281,8 +282,13 @@ repeat:
22909 pfn = start_pfn;
22910 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22911 pgd = pgd_base + pgd_idx;
22912 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22913 - pmd = one_md_table_init(pgd);
22914 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22915 + pud = pud_offset(pgd, 0);
22916 + pmd = pmd_offset(pud, 0);
22917 +
22918 +#ifdef CONFIG_X86_PAE
22919 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22920 +#endif
22921
22922 if (pfn >= end_pfn)
22923 continue;
22924 @@ -294,14 +300,13 @@ repeat:
22925 #endif
22926 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22927 pmd++, pmd_idx++) {
22928 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22929 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22930
22931 /*
22932 * Map with big pages if possible, otherwise
22933 * create normal page tables:
22934 */
22935 if (use_pse) {
22936 - unsigned int addr2;
22937 pgprot_t prot = PAGE_KERNEL_LARGE;
22938 /*
22939 * first pass will use the same initial
22940 @@ -311,11 +316,7 @@ repeat:
22941 __pgprot(PTE_IDENT_ATTR |
22942 _PAGE_PSE);
22943
22944 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22945 - PAGE_OFFSET + PAGE_SIZE-1;
22946 -
22947 - if (is_kernel_text(addr) ||
22948 - is_kernel_text(addr2))
22949 + if (is_kernel_text(address, address + PMD_SIZE))
22950 prot = PAGE_KERNEL_LARGE_EXEC;
22951
22952 pages_2m++;
22953 @@ -332,7 +333,7 @@ repeat:
22954 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22955 pte += pte_ofs;
22956 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22957 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22958 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22959 pgprot_t prot = PAGE_KERNEL;
22960 /*
22961 * first pass will use the same initial
22962 @@ -340,7 +341,7 @@ repeat:
22963 */
22964 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22965
22966 - if (is_kernel_text(addr))
22967 + if (is_kernel_text(address, address + PAGE_SIZE))
22968 prot = PAGE_KERNEL_EXEC;
22969
22970 pages_4k++;
22971 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22972
22973 pud = pud_offset(pgd, va);
22974 pmd = pmd_offset(pud, va);
22975 - if (!pmd_present(*pmd))
22976 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22977 break;
22978
22979 pte = pte_offset_kernel(pmd, va);
22980 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22981
22982 static void __init pagetable_init(void)
22983 {
22984 - pgd_t *pgd_base = swapper_pg_dir;
22985 -
22986 - permanent_kmaps_init(pgd_base);
22987 + permanent_kmaps_init(swapper_pg_dir);
22988 }
22989
22990 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22991 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22992 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22993
22994 /* user-defined highmem size */
22995 @@ -757,6 +756,12 @@ void __init mem_init(void)
22996
22997 pci_iommu_alloc();
22998
22999 +#ifdef CONFIG_PAX_PER_CPU_PGD
23000 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23001 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23002 + KERNEL_PGD_PTRS);
23003 +#endif
23004 +
23005 #ifdef CONFIG_FLATMEM
23006 BUG_ON(!mem_map);
23007 #endif
23008 @@ -774,7 +779,7 @@ void __init mem_init(void)
23009 set_highmem_pages_init();
23010
23011 codesize = (unsigned long) &_etext - (unsigned long) &_text;
23012 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
23013 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
23014 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
23015
23016 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
23017 @@ -815,10 +820,10 @@ void __init mem_init(void)
23018 ((unsigned long)&__init_end -
23019 (unsigned long)&__init_begin) >> 10,
23020
23021 - (unsigned long)&_etext, (unsigned long)&_edata,
23022 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
23023 + (unsigned long)&_sdata, (unsigned long)&_edata,
23024 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
23025
23026 - (unsigned long)&_text, (unsigned long)&_etext,
23027 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
23028 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
23029
23030 /*
23031 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
23032 if (!kernel_set_to_readonly)
23033 return;
23034
23035 + start = ktla_ktva(start);
23036 pr_debug("Set kernel text: %lx - %lx for read write\n",
23037 start, start+size);
23038
23039 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
23040 if (!kernel_set_to_readonly)
23041 return;
23042
23043 + start = ktla_ktva(start);
23044 pr_debug("Set kernel text: %lx - %lx for read only\n",
23045 start, start+size);
23046
23047 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
23048 unsigned long start = PFN_ALIGN(_text);
23049 unsigned long size = PFN_ALIGN(_etext) - start;
23050
23051 + start = ktla_ktva(start);
23052 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
23053 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
23054 size >> 10);
23055 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
23056 index bbaaa00..796fa65 100644
23057 --- a/arch/x86/mm/init_64.c
23058 +++ b/arch/x86/mm/init_64.c
23059 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
23060 * around without checking the pgd every time.
23061 */
23062
23063 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
23064 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
23065 EXPORT_SYMBOL_GPL(__supported_pte_mask);
23066
23067 int force_personality32;
23068 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23069
23070 for (address = start; address <= end; address += PGDIR_SIZE) {
23071 const pgd_t *pgd_ref = pgd_offset_k(address);
23072 +
23073 +#ifdef CONFIG_PAX_PER_CPU_PGD
23074 + unsigned long cpu;
23075 +#else
23076 struct page *page;
23077 +#endif
23078
23079 if (pgd_none(*pgd_ref))
23080 continue;
23081
23082 spin_lock(&pgd_lock);
23083 +
23084 +#ifdef CONFIG_PAX_PER_CPU_PGD
23085 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23086 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
23087 +#else
23088 list_for_each_entry(page, &pgd_list, lru) {
23089 pgd_t *pgd;
23090 spinlock_t *pgt_lock;
23091 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23092 /* the pgt_lock only for Xen */
23093 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23094 spin_lock(pgt_lock);
23095 +#endif
23096
23097 if (pgd_none(*pgd))
23098 set_pgd(pgd, *pgd_ref);
23099 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23100 BUG_ON(pgd_page_vaddr(*pgd)
23101 != pgd_page_vaddr(*pgd_ref));
23102
23103 +#ifndef CONFIG_PAX_PER_CPU_PGD
23104 spin_unlock(pgt_lock);
23105 +#endif
23106 +
23107 }
23108 spin_unlock(&pgd_lock);
23109 }
23110 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
23111 pmd = fill_pmd(pud, vaddr);
23112 pte = fill_pte(pmd, vaddr);
23113
23114 + pax_open_kernel();
23115 set_pte(pte, new_pte);
23116 + pax_close_kernel();
23117
23118 /*
23119 * It's enough to flush this one mapping.
23120 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
23121 pgd = pgd_offset_k((unsigned long)__va(phys));
23122 if (pgd_none(*pgd)) {
23123 pud = (pud_t *) spp_getpage();
23124 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
23125 - _PAGE_USER));
23126 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
23127 }
23128 pud = pud_offset(pgd, (unsigned long)__va(phys));
23129 if (pud_none(*pud)) {
23130 pmd = (pmd_t *) spp_getpage();
23131 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
23132 - _PAGE_USER));
23133 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
23134 }
23135 pmd = pmd_offset(pud, phys);
23136 BUG_ON(!pmd_none(*pmd));
23137 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
23138 if (pfn >= pgt_buf_top)
23139 panic("alloc_low_page: ran out of memory");
23140
23141 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23142 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23143 clear_page(adr);
23144 *phys = pfn * PAGE_SIZE;
23145 return adr;
23146 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
23147
23148 phys = __pa(virt);
23149 left = phys & (PAGE_SIZE - 1);
23150 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23151 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23152 adr = (void *)(((unsigned long)adr) | left);
23153
23154 return adr;
23155 @@ -693,6 +707,12 @@ void __init mem_init(void)
23156
23157 pci_iommu_alloc();
23158
23159 +#ifdef CONFIG_PAX_PER_CPU_PGD
23160 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23161 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23162 + KERNEL_PGD_PTRS);
23163 +#endif
23164 +
23165 /* clear_bss() already clear the empty_zero_page */
23166
23167 reservedpages = 0;
23168 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
23169 static struct vm_area_struct gate_vma = {
23170 .vm_start = VSYSCALL_START,
23171 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
23172 - .vm_page_prot = PAGE_READONLY_EXEC,
23173 - .vm_flags = VM_READ | VM_EXEC
23174 + .vm_page_prot = PAGE_READONLY,
23175 + .vm_flags = VM_READ
23176 };
23177
23178 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
23179 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
23180
23181 const char *arch_vma_name(struct vm_area_struct *vma)
23182 {
23183 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23184 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23185 return "[vdso]";
23186 if (vma == &gate_vma)
23187 return "[vsyscall]";
23188 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
23189 index 7b179b4..6bd1777 100644
23190 --- a/arch/x86/mm/iomap_32.c
23191 +++ b/arch/x86/mm/iomap_32.c
23192 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
23193 type = kmap_atomic_idx_push();
23194 idx = type + KM_TYPE_NR * smp_processor_id();
23195 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23196 +
23197 + pax_open_kernel();
23198 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
23199 + pax_close_kernel();
23200 +
23201 arch_flush_lazy_mmu_mode();
23202
23203 return (void *)vaddr;
23204 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
23205 index be1ef57..55f0160 100644
23206 --- a/arch/x86/mm/ioremap.c
23207 +++ b/arch/x86/mm/ioremap.c
23208 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
23209 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
23210 int is_ram = page_is_ram(pfn);
23211
23212 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
23213 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
23214 return NULL;
23215 WARN_ON_ONCE(is_ram);
23216 }
23217 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
23218
23219 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
23220 if (page_is_ram(start >> PAGE_SHIFT))
23221 +#ifdef CONFIG_HIGHMEM
23222 + if ((start >> PAGE_SHIFT) < max_low_pfn)
23223 +#endif
23224 return __va(phys);
23225
23226 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
23227 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
23228 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23229
23230 static __initdata int after_paging_init;
23231 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23232 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23233
23234 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23235 {
23236 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
23237 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23238
23239 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23240 - memset(bm_pte, 0, sizeof(bm_pte));
23241 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
23242 + pmd_populate_user(&init_mm, pmd, bm_pte);
23243
23244 /*
23245 * The boot-ioremap range spans multiple pmds, for which
23246 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23247 index d87dd6d..bf3fa66 100644
23248 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
23249 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23250 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23251 * memory (e.g. tracked pages)? For now, we need this to avoid
23252 * invoking kmemcheck for PnP BIOS calls.
23253 */
23254 - if (regs->flags & X86_VM_MASK)
23255 + if (v8086_mode(regs))
23256 return false;
23257 - if (regs->cs != __KERNEL_CS)
23258 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23259 return false;
23260
23261 pte = kmemcheck_pte_lookup(address);
23262 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23263 index 845df68..1d8d29f 100644
23264 --- a/arch/x86/mm/mmap.c
23265 +++ b/arch/x86/mm/mmap.c
23266 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23267 * Leave an at least ~128 MB hole with possible stack randomization.
23268 */
23269 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23270 -#define MAX_GAP (TASK_SIZE/6*5)
23271 +#define MAX_GAP (pax_task_size/6*5)
23272
23273 static int mmap_is_legacy(void)
23274 {
23275 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23276 return rnd << PAGE_SHIFT;
23277 }
23278
23279 -static unsigned long mmap_base(void)
23280 +static unsigned long mmap_base(struct mm_struct *mm)
23281 {
23282 unsigned long gap = rlimit(RLIMIT_STACK);
23283 + unsigned long pax_task_size = TASK_SIZE;
23284 +
23285 +#ifdef CONFIG_PAX_SEGMEXEC
23286 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23287 + pax_task_size = SEGMEXEC_TASK_SIZE;
23288 +#endif
23289
23290 if (gap < MIN_GAP)
23291 gap = MIN_GAP;
23292 else if (gap > MAX_GAP)
23293 gap = MAX_GAP;
23294
23295 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23296 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23297 }
23298
23299 /*
23300 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23301 * does, but not when emulating X86_32
23302 */
23303 -static unsigned long mmap_legacy_base(void)
23304 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
23305 {
23306 - if (mmap_is_ia32())
23307 + if (mmap_is_ia32()) {
23308 +
23309 +#ifdef CONFIG_PAX_SEGMEXEC
23310 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23311 + return SEGMEXEC_TASK_UNMAPPED_BASE;
23312 + else
23313 +#endif
23314 +
23315 return TASK_UNMAPPED_BASE;
23316 - else
23317 + } else
23318 return TASK_UNMAPPED_BASE + mmap_rnd();
23319 }
23320
23321 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23322 void arch_pick_mmap_layout(struct mm_struct *mm)
23323 {
23324 if (mmap_is_legacy()) {
23325 - mm->mmap_base = mmap_legacy_base();
23326 + mm->mmap_base = mmap_legacy_base(mm);
23327 +
23328 +#ifdef CONFIG_PAX_RANDMMAP
23329 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23330 + mm->mmap_base += mm->delta_mmap;
23331 +#endif
23332 +
23333 mm->get_unmapped_area = arch_get_unmapped_area;
23334 mm->unmap_area = arch_unmap_area;
23335 } else {
23336 - mm->mmap_base = mmap_base();
23337 + mm->mmap_base = mmap_base(mm);
23338 +
23339 +#ifdef CONFIG_PAX_RANDMMAP
23340 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23341 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23342 +#endif
23343 +
23344 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23345 mm->unmap_area = arch_unmap_area_topdown;
23346 }
23347 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23348 index de54b9b..799051e 100644
23349 --- a/arch/x86/mm/mmio-mod.c
23350 +++ b/arch/x86/mm/mmio-mod.c
23351 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23352 break;
23353 default:
23354 {
23355 - unsigned char *ip = (unsigned char *)instptr;
23356 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23357 my_trace->opcode = MMIO_UNKNOWN_OP;
23358 my_trace->width = 0;
23359 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23360 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23361 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23362 void __iomem *addr)
23363 {
23364 - static atomic_t next_id;
23365 + static atomic_unchecked_t next_id;
23366 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23367 /* These are page-unaligned. */
23368 struct mmiotrace_map map = {
23369 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23370 .private = trace
23371 },
23372 .phys = offset,
23373 - .id = atomic_inc_return(&next_id)
23374 + .id = atomic_inc_return_unchecked(&next_id)
23375 };
23376 map.map_id = trace->id;
23377
23378 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23379 index b008656..773eac2 100644
23380 --- a/arch/x86/mm/pageattr-test.c
23381 +++ b/arch/x86/mm/pageattr-test.c
23382 @@ -36,7 +36,7 @@ enum {
23383
23384 static int pte_testbit(pte_t pte)
23385 {
23386 - return pte_flags(pte) & _PAGE_UNUSED1;
23387 + return pte_flags(pte) & _PAGE_CPA_TEST;
23388 }
23389
23390 struct split_state {
23391 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23392 index f9e5267..77b1a40 100644
23393 --- a/arch/x86/mm/pageattr.c
23394 +++ b/arch/x86/mm/pageattr.c
23395 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23396 */
23397 #ifdef CONFIG_PCI_BIOS
23398 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23399 - pgprot_val(forbidden) |= _PAGE_NX;
23400 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23401 #endif
23402
23403 /*
23404 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23405 * Does not cover __inittext since that is gone later on. On
23406 * 64bit we do not enforce !NX on the low mapping
23407 */
23408 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23409 - pgprot_val(forbidden) |= _PAGE_NX;
23410 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23411 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23412
23413 +#ifdef CONFIG_DEBUG_RODATA
23414 /*
23415 * The .rodata section needs to be read-only. Using the pfn
23416 * catches all aliases.
23417 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23418 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23419 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23420 pgprot_val(forbidden) |= _PAGE_RW;
23421 +#endif
23422
23423 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23424 /*
23425 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23426 }
23427 #endif
23428
23429 +#ifdef CONFIG_PAX_KERNEXEC
23430 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23431 + pgprot_val(forbidden) |= _PAGE_RW;
23432 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23433 + }
23434 +#endif
23435 +
23436 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23437
23438 return prot;
23439 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23440 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23441 {
23442 /* change init_mm */
23443 + pax_open_kernel();
23444 set_pte_atomic(kpte, pte);
23445 +
23446 #ifdef CONFIG_X86_32
23447 if (!SHARED_KERNEL_PMD) {
23448 +
23449 +#ifdef CONFIG_PAX_PER_CPU_PGD
23450 + unsigned long cpu;
23451 +#else
23452 struct page *page;
23453 +#endif
23454
23455 +#ifdef CONFIG_PAX_PER_CPU_PGD
23456 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23457 + pgd_t *pgd = get_cpu_pgd(cpu);
23458 +#else
23459 list_for_each_entry(page, &pgd_list, lru) {
23460 - pgd_t *pgd;
23461 + pgd_t *pgd = (pgd_t *)page_address(page);
23462 +#endif
23463 +
23464 pud_t *pud;
23465 pmd_t *pmd;
23466
23467 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23468 + pgd += pgd_index(address);
23469 pud = pud_offset(pgd, address);
23470 pmd = pmd_offset(pud, address);
23471 set_pte_atomic((pte_t *)pmd, pte);
23472 }
23473 }
23474 #endif
23475 + pax_close_kernel();
23476 }
23477
23478 static int
23479 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23480 index f6ff57b..481690f 100644
23481 --- a/arch/x86/mm/pat.c
23482 +++ b/arch/x86/mm/pat.c
23483 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23484
23485 if (!entry) {
23486 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23487 - current->comm, current->pid, start, end);
23488 + current->comm, task_pid_nr(current), start, end);
23489 return -EINVAL;
23490 }
23491
23492 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23493 while (cursor < to) {
23494 if (!devmem_is_allowed(pfn)) {
23495 printk(KERN_INFO
23496 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23497 - current->comm, from, to);
23498 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23499 + current->comm, from, to, cursor);
23500 return 0;
23501 }
23502 cursor += PAGE_SIZE;
23503 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23504 printk(KERN_INFO
23505 "%s:%d ioremap_change_attr failed %s "
23506 "for %Lx-%Lx\n",
23507 - current->comm, current->pid,
23508 + current->comm, task_pid_nr(current),
23509 cattr_name(flags),
23510 base, (unsigned long long)(base + size));
23511 return -EINVAL;
23512 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23513 if (want_flags != flags) {
23514 printk(KERN_WARNING
23515 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23516 - current->comm, current->pid,
23517 + current->comm, task_pid_nr(current),
23518 cattr_name(want_flags),
23519 (unsigned long long)paddr,
23520 (unsigned long long)(paddr + size),
23521 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23522 free_memtype(paddr, paddr + size);
23523 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23524 " for %Lx-%Lx, got %s\n",
23525 - current->comm, current->pid,
23526 + current->comm, task_pid_nr(current),
23527 cattr_name(want_flags),
23528 (unsigned long long)paddr,
23529 (unsigned long long)(paddr + size),
23530 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23531 index 9f0614d..92ae64a 100644
23532 --- a/arch/x86/mm/pf_in.c
23533 +++ b/arch/x86/mm/pf_in.c
23534 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23535 int i;
23536 enum reason_type rv = OTHERS;
23537
23538 - p = (unsigned char *)ins_addr;
23539 + p = (unsigned char *)ktla_ktva(ins_addr);
23540 p += skip_prefix(p, &prf);
23541 p += get_opcode(p, &opcode);
23542
23543 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23544 struct prefix_bits prf;
23545 int i;
23546
23547 - p = (unsigned char *)ins_addr;
23548 + p = (unsigned char *)ktla_ktva(ins_addr);
23549 p += skip_prefix(p, &prf);
23550 p += get_opcode(p, &opcode);
23551
23552 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23553 struct prefix_bits prf;
23554 int i;
23555
23556 - p = (unsigned char *)ins_addr;
23557 + p = (unsigned char *)ktla_ktva(ins_addr);
23558 p += skip_prefix(p, &prf);
23559 p += get_opcode(p, &opcode);
23560
23561 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23562 struct prefix_bits prf;
23563 int i;
23564
23565 - p = (unsigned char *)ins_addr;
23566 + p = (unsigned char *)ktla_ktva(ins_addr);
23567 p += skip_prefix(p, &prf);
23568 p += get_opcode(p, &opcode);
23569 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23570 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23571 struct prefix_bits prf;
23572 int i;
23573
23574 - p = (unsigned char *)ins_addr;
23575 + p = (unsigned char *)ktla_ktva(ins_addr);
23576 p += skip_prefix(p, &prf);
23577 p += get_opcode(p, &opcode);
23578 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23579 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23580 index 8573b83..c3b1a30 100644
23581 --- a/arch/x86/mm/pgtable.c
23582 +++ b/arch/x86/mm/pgtable.c
23583 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23584 list_del(&page->lru);
23585 }
23586
23587 -#define UNSHARED_PTRS_PER_PGD \
23588 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23589 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23590 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23591
23592 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23593 +{
23594 + while (count--)
23595 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23596 +}
23597 +#endif
23598
23599 +#ifdef CONFIG_PAX_PER_CPU_PGD
23600 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23601 +{
23602 + while (count--)
23603 +
23604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23605 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23606 +#else
23607 + *dst++ = *src++;
23608 +#endif
23609 +
23610 +}
23611 +#endif
23612 +
23613 +#ifdef CONFIG_X86_64
23614 +#define pxd_t pud_t
23615 +#define pyd_t pgd_t
23616 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23617 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23618 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23619 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
23620 +#define PYD_SIZE PGDIR_SIZE
23621 +#else
23622 +#define pxd_t pmd_t
23623 +#define pyd_t pud_t
23624 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23625 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23626 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23627 +#define pyd_offset(mm, address) pud_offset((mm), (address))
23628 +#define PYD_SIZE PUD_SIZE
23629 +#endif
23630 +
23631 +#ifdef CONFIG_PAX_PER_CPU_PGD
23632 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23633 +static inline void pgd_dtor(pgd_t *pgd) {}
23634 +#else
23635 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23636 {
23637 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23638 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23639 pgd_list_del(pgd);
23640 spin_unlock(&pgd_lock);
23641 }
23642 +#endif
23643
23644 /*
23645 * List of all pgd's needed for non-PAE so it can invalidate entries
23646 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23647 * -- wli
23648 */
23649
23650 -#ifdef CONFIG_X86_PAE
23651 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23652 /*
23653 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23654 * updating the top-level pagetable entries to guarantee the
23655 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23656 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23657 * and initialize the kernel pmds here.
23658 */
23659 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23660 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23661
23662 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23663 {
23664 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23665 */
23666 flush_tlb_mm(mm);
23667 }
23668 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23669 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23670 #else /* !CONFIG_X86_PAE */
23671
23672 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23673 -#define PREALLOCATED_PMDS 0
23674 +#define PREALLOCATED_PXDS 0
23675
23676 #endif /* CONFIG_X86_PAE */
23677
23678 -static void free_pmds(pmd_t *pmds[])
23679 +static void free_pxds(pxd_t *pxds[])
23680 {
23681 int i;
23682
23683 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23684 - if (pmds[i])
23685 - free_page((unsigned long)pmds[i]);
23686 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23687 + if (pxds[i])
23688 + free_page((unsigned long)pxds[i]);
23689 }
23690
23691 -static int preallocate_pmds(pmd_t *pmds[])
23692 +static int preallocate_pxds(pxd_t *pxds[])
23693 {
23694 int i;
23695 bool failed = false;
23696
23697 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23698 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23699 - if (pmd == NULL)
23700 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23701 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23702 + if (pxd == NULL)
23703 failed = true;
23704 - pmds[i] = pmd;
23705 + pxds[i] = pxd;
23706 }
23707
23708 if (failed) {
23709 - free_pmds(pmds);
23710 + free_pxds(pxds);
23711 return -ENOMEM;
23712 }
23713
23714 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23715 * preallocate which never got a corresponding vma will need to be
23716 * freed manually.
23717 */
23718 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23719 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23720 {
23721 int i;
23722
23723 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23724 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23725 pgd_t pgd = pgdp[i];
23726
23727 if (pgd_val(pgd) != 0) {
23728 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23729 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23730
23731 - pgdp[i] = native_make_pgd(0);
23732 + set_pgd(pgdp + i, native_make_pgd(0));
23733
23734 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23735 - pmd_free(mm, pmd);
23736 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23737 + pxd_free(mm, pxd);
23738 }
23739 }
23740 }
23741
23742 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23743 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23744 {
23745 - pud_t *pud;
23746 + pyd_t *pyd;
23747 unsigned long addr;
23748 int i;
23749
23750 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23751 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23752 return;
23753
23754 - pud = pud_offset(pgd, 0);
23755 +#ifdef CONFIG_X86_64
23756 + pyd = pyd_offset(mm, 0L);
23757 +#else
23758 + pyd = pyd_offset(pgd, 0L);
23759 +#endif
23760
23761 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23762 - i++, pud++, addr += PUD_SIZE) {
23763 - pmd_t *pmd = pmds[i];
23764 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23765 + i++, pyd++, addr += PYD_SIZE) {
23766 + pxd_t *pxd = pxds[i];
23767
23768 if (i >= KERNEL_PGD_BOUNDARY)
23769 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23770 - sizeof(pmd_t) * PTRS_PER_PMD);
23771 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23772 + sizeof(pxd_t) * PTRS_PER_PMD);
23773
23774 - pud_populate(mm, pud, pmd);
23775 + pyd_populate(mm, pyd, pxd);
23776 }
23777 }
23778
23779 pgd_t *pgd_alloc(struct mm_struct *mm)
23780 {
23781 pgd_t *pgd;
23782 - pmd_t *pmds[PREALLOCATED_PMDS];
23783 + pxd_t *pxds[PREALLOCATED_PXDS];
23784
23785 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23786
23787 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23788
23789 mm->pgd = pgd;
23790
23791 - if (preallocate_pmds(pmds) != 0)
23792 + if (preallocate_pxds(pxds) != 0)
23793 goto out_free_pgd;
23794
23795 if (paravirt_pgd_alloc(mm) != 0)
23796 - goto out_free_pmds;
23797 + goto out_free_pxds;
23798
23799 /*
23800 * Make sure that pre-populating the pmds is atomic with
23801 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23802 spin_lock(&pgd_lock);
23803
23804 pgd_ctor(mm, pgd);
23805 - pgd_prepopulate_pmd(mm, pgd, pmds);
23806 + pgd_prepopulate_pxd(mm, pgd, pxds);
23807
23808 spin_unlock(&pgd_lock);
23809
23810 return pgd;
23811
23812 -out_free_pmds:
23813 - free_pmds(pmds);
23814 +out_free_pxds:
23815 + free_pxds(pxds);
23816 out_free_pgd:
23817 free_page((unsigned long)pgd);
23818 out:
23819 @@ -295,7 +344,7 @@ out:
23820
23821 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23822 {
23823 - pgd_mop_up_pmds(mm, pgd);
23824 + pgd_mop_up_pxds(mm, pgd);
23825 pgd_dtor(pgd);
23826 paravirt_pgd_free(mm, pgd);
23827 free_page((unsigned long)pgd);
23828 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23829 index cac7184..09a39fa 100644
23830 --- a/arch/x86/mm/pgtable_32.c
23831 +++ b/arch/x86/mm/pgtable_32.c
23832 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23833 return;
23834 }
23835 pte = pte_offset_kernel(pmd, vaddr);
23836 +
23837 + pax_open_kernel();
23838 if (pte_val(pteval))
23839 set_pte_at(&init_mm, vaddr, pte, pteval);
23840 else
23841 pte_clear(&init_mm, vaddr, pte);
23842 + pax_close_kernel();
23843
23844 /*
23845 * It's enough to flush this one mapping.
23846 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23847 index 410531d..0f16030 100644
23848 --- a/arch/x86/mm/setup_nx.c
23849 +++ b/arch/x86/mm/setup_nx.c
23850 @@ -5,8 +5,10 @@
23851 #include <asm/pgtable.h>
23852 #include <asm/proto.h>
23853
23854 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23855 static int disable_nx __cpuinitdata;
23856
23857 +#ifndef CONFIG_PAX_PAGEEXEC
23858 /*
23859 * noexec = on|off
23860 *
23861 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23862 return 0;
23863 }
23864 early_param("noexec", noexec_setup);
23865 +#endif
23866 +
23867 +#endif
23868
23869 void __cpuinit x86_configure_nx(void)
23870 {
23871 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23872 if (cpu_has_nx && !disable_nx)
23873 __supported_pte_mask |= _PAGE_NX;
23874 else
23875 +#endif
23876 __supported_pte_mask &= ~_PAGE_NX;
23877 }
23878
23879 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23880 index d6c0418..06a0ad5 100644
23881 --- a/arch/x86/mm/tlb.c
23882 +++ b/arch/x86/mm/tlb.c
23883 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23884 BUG();
23885 cpumask_clear_cpu(cpu,
23886 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23887 +
23888 +#ifndef CONFIG_PAX_PER_CPU_PGD
23889 load_cr3(swapper_pg_dir);
23890 +#endif
23891 +
23892 }
23893 EXPORT_SYMBOL_GPL(leave_mm);
23894
23895 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23896 index 6687022..ceabcfa 100644
23897 --- a/arch/x86/net/bpf_jit.S
23898 +++ b/arch/x86/net/bpf_jit.S
23899 @@ -9,6 +9,7 @@
23900 */
23901 #include <linux/linkage.h>
23902 #include <asm/dwarf2.h>
23903 +#include <asm/alternative-asm.h>
23904
23905 /*
23906 * Calling convention :
23907 @@ -35,6 +36,7 @@ sk_load_word:
23908 jle bpf_slow_path_word
23909 mov (SKBDATA,%rsi),%eax
23910 bswap %eax /* ntohl() */
23911 + pax_force_retaddr
23912 ret
23913
23914
23915 @@ -53,6 +55,7 @@ sk_load_half:
23916 jle bpf_slow_path_half
23917 movzwl (SKBDATA,%rsi),%eax
23918 rol $8,%ax # ntohs()
23919 + pax_force_retaddr
23920 ret
23921
23922 sk_load_byte_ind:
23923 @@ -66,6 +69,7 @@ sk_load_byte:
23924 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23925 jle bpf_slow_path_byte
23926 movzbl (SKBDATA,%rsi),%eax
23927 + pax_force_retaddr
23928 ret
23929
23930 /**
23931 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23932 movzbl (SKBDATA,%rsi),%ebx
23933 and $15,%bl
23934 shl $2,%bl
23935 + pax_force_retaddr
23936 ret
23937 CFI_ENDPROC
23938 ENDPROC(sk_load_byte_msh)
23939 @@ -91,6 +96,7 @@ bpf_error:
23940 xor %eax,%eax
23941 mov -8(%rbp),%rbx
23942 leaveq
23943 + pax_force_retaddr
23944 ret
23945
23946 /* rsi contains offset and can be scratched */
23947 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23948 js bpf_error
23949 mov -12(%rbp),%eax
23950 bswap %eax
23951 + pax_force_retaddr
23952 ret
23953
23954 bpf_slow_path_half:
23955 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23956 mov -12(%rbp),%ax
23957 rol $8,%ax
23958 movzwl %ax,%eax
23959 + pax_force_retaddr
23960 ret
23961
23962 bpf_slow_path_byte:
23963 bpf_slow_path_common(1)
23964 js bpf_error
23965 movzbl -12(%rbp),%eax
23966 + pax_force_retaddr
23967 ret
23968
23969 bpf_slow_path_byte_msh:
23970 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23971 and $15,%al
23972 shl $2,%al
23973 xchg %eax,%ebx
23974 + pax_force_retaddr
23975 ret
23976 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23977 index 7c1b765..8c072c6 100644
23978 --- a/arch/x86/net/bpf_jit_comp.c
23979 +++ b/arch/x86/net/bpf_jit_comp.c
23980 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23981 set_fs(old_fs);
23982 }
23983
23984 +struct bpf_jit_work {
23985 + struct work_struct work;
23986 + void *image;
23987 +};
23988
23989 void bpf_jit_compile(struct sk_filter *fp)
23990 {
23991 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23992 if (addrs == NULL)
23993 return;
23994
23995 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23996 + if (!fp->work)
23997 + goto out;
23998 +
23999 /* Before first pass, make a rough estimation of addrs[]
24000 * each bpf instruction is translated to less than 64 bytes
24001 */
24002 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
24003 func = sk_load_word;
24004 common_load: seen |= SEEN_DATAREF;
24005 if ((int)K < 0)
24006 - goto out;
24007 + goto error;
24008 t_offset = func - (image + addrs[i]);
24009 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
24010 EMIT1_off32(0xe8, t_offset); /* call */
24011 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24012 break;
24013 default:
24014 /* hmm, too complex filter, give up with jit compiler */
24015 - goto out;
24016 + goto error;
24017 }
24018 ilen = prog - temp;
24019 if (image) {
24020 if (unlikely(proglen + ilen > oldproglen)) {
24021 pr_err("bpb_jit_compile fatal error\n");
24022 - kfree(addrs);
24023 - module_free(NULL, image);
24024 - return;
24025 + module_free_exec(NULL, image);
24026 + goto error;
24027 }
24028 + pax_open_kernel();
24029 memcpy(image + proglen, temp, ilen);
24030 + pax_close_kernel();
24031 }
24032 proglen += ilen;
24033 addrs[i] = proglen;
24034 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24035 break;
24036 }
24037 if (proglen == oldproglen) {
24038 - image = module_alloc(max_t(unsigned int,
24039 - proglen,
24040 - sizeof(struct work_struct)));
24041 + image = module_alloc_exec(proglen);
24042 if (!image)
24043 - goto out;
24044 + goto error;
24045 }
24046 oldproglen = proglen;
24047 }
24048 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24049 bpf_flush_icache(image, image + proglen);
24050
24051 fp->bpf_func = (void *)image;
24052 - }
24053 + } else
24054 +error:
24055 + kfree(fp->work);
24056 +
24057 out:
24058 kfree(addrs);
24059 return;
24060 @@ -645,18 +655,20 @@ out:
24061
24062 static void jit_free_defer(struct work_struct *arg)
24063 {
24064 - module_free(NULL, arg);
24065 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
24066 + kfree(arg);
24067 }
24068
24069 /* run from softirq, we must use a work_struct to call
24070 - * module_free() from process context
24071 + * module_free_exec() from process context
24072 */
24073 void bpf_jit_free(struct sk_filter *fp)
24074 {
24075 if (fp->bpf_func != sk_run_filter) {
24076 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
24077 + struct work_struct *work = &fp->work->work;
24078
24079 INIT_WORK(work, jit_free_defer);
24080 + fp->work->image = fp->bpf_func;
24081 schedule_work(work);
24082 }
24083 }
24084 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
24085 index bff89df..377758a 100644
24086 --- a/arch/x86/oprofile/backtrace.c
24087 +++ b/arch/x86/oprofile/backtrace.c
24088 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
24089 struct stack_frame_ia32 *fp;
24090 unsigned long bytes;
24091
24092 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24093 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24094 if (bytes != sizeof(bufhead))
24095 return NULL;
24096
24097 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
24098 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
24099
24100 oprofile_add_trace(bufhead[0].return_address);
24101
24102 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
24103 struct stack_frame bufhead[2];
24104 unsigned long bytes;
24105
24106 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24107 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24108 if (bytes != sizeof(bufhead))
24109 return NULL;
24110
24111 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
24112 {
24113 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
24114
24115 - if (!user_mode_vm(regs)) {
24116 + if (!user_mode(regs)) {
24117 unsigned long stack = kernel_stack_pointer(regs);
24118 if (depth)
24119 dump_trace(NULL, regs, (unsigned long *)stack, 0,
24120 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
24121 index cb29191..036766d 100644
24122 --- a/arch/x86/pci/mrst.c
24123 +++ b/arch/x86/pci/mrst.c
24124 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
24125 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
24126 pci_mmcfg_late_init();
24127 pcibios_enable_irq = mrst_pci_irq_enable;
24128 - pci_root_ops = pci_mrst_ops;
24129 + pax_open_kernel();
24130 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
24131 + pax_close_kernel();
24132 /* Continue with standard init */
24133 return 1;
24134 }
24135 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
24136 index db0e9a5..0372c14 100644
24137 --- a/arch/x86/pci/pcbios.c
24138 +++ b/arch/x86/pci/pcbios.c
24139 @@ -79,50 +79,93 @@ union bios32 {
24140 static struct {
24141 unsigned long address;
24142 unsigned short segment;
24143 -} bios32_indirect = { 0, __KERNEL_CS };
24144 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
24145
24146 /*
24147 * Returns the entry point for the given service, NULL on error
24148 */
24149
24150 -static unsigned long bios32_service(unsigned long service)
24151 +static unsigned long __devinit bios32_service(unsigned long service)
24152 {
24153 unsigned char return_code; /* %al */
24154 unsigned long address; /* %ebx */
24155 unsigned long length; /* %ecx */
24156 unsigned long entry; /* %edx */
24157 unsigned long flags;
24158 + struct desc_struct d, *gdt;
24159
24160 local_irq_save(flags);
24161 - __asm__("lcall *(%%edi); cld"
24162 +
24163 + gdt = get_cpu_gdt_table(smp_processor_id());
24164 +
24165 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
24166 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24167 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
24168 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24169 +
24170 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
24171 : "=a" (return_code),
24172 "=b" (address),
24173 "=c" (length),
24174 "=d" (entry)
24175 : "0" (service),
24176 "1" (0),
24177 - "D" (&bios32_indirect));
24178 + "D" (&bios32_indirect),
24179 + "r"(__PCIBIOS_DS)
24180 + : "memory");
24181 +
24182 + pax_open_kernel();
24183 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
24184 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
24185 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
24186 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
24187 + pax_close_kernel();
24188 +
24189 local_irq_restore(flags);
24190
24191 switch (return_code) {
24192 - case 0:
24193 - return address + entry;
24194 - case 0x80: /* Not present */
24195 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24196 - return 0;
24197 - default: /* Shouldn't happen */
24198 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24199 - service, return_code);
24200 + case 0: {
24201 + int cpu;
24202 + unsigned char flags;
24203 +
24204 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
24205 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
24206 + printk(KERN_WARNING "bios32_service: not valid\n");
24207 return 0;
24208 + }
24209 + address = address + PAGE_OFFSET;
24210 + length += 16UL; /* some BIOSs underreport this... */
24211 + flags = 4;
24212 + if (length >= 64*1024*1024) {
24213 + length >>= PAGE_SHIFT;
24214 + flags |= 8;
24215 + }
24216 +
24217 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24218 + gdt = get_cpu_gdt_table(cpu);
24219 + pack_descriptor(&d, address, length, 0x9b, flags);
24220 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24221 + pack_descriptor(&d, address, length, 0x93, flags);
24222 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24223 + }
24224 + return entry;
24225 + }
24226 + case 0x80: /* Not present */
24227 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24228 + return 0;
24229 + default: /* Shouldn't happen */
24230 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24231 + service, return_code);
24232 + return 0;
24233 }
24234 }
24235
24236 static struct {
24237 unsigned long address;
24238 unsigned short segment;
24239 -} pci_indirect = { 0, __KERNEL_CS };
24240 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24241
24242 -static int pci_bios_present;
24243 +static int pci_bios_present __read_only;
24244
24245 static int __devinit check_pcibios(void)
24246 {
24247 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
24248 unsigned long flags, pcibios_entry;
24249
24250 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24251 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24252 + pci_indirect.address = pcibios_entry;
24253
24254 local_irq_save(flags);
24255 - __asm__(
24256 - "lcall *(%%edi); cld\n\t"
24257 + __asm__("movw %w6, %%ds\n\t"
24258 + "lcall *%%ss:(%%edi); cld\n\t"
24259 + "push %%ss\n\t"
24260 + "pop %%ds\n\t"
24261 "jc 1f\n\t"
24262 "xor %%ah, %%ah\n"
24263 "1:"
24264 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
24265 "=b" (ebx),
24266 "=c" (ecx)
24267 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24268 - "D" (&pci_indirect)
24269 + "D" (&pci_indirect),
24270 + "r" (__PCIBIOS_DS)
24271 : "memory");
24272 local_irq_restore(flags);
24273
24274 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24275
24276 switch (len) {
24277 case 1:
24278 - __asm__("lcall *(%%esi); cld\n\t"
24279 + __asm__("movw %w6, %%ds\n\t"
24280 + "lcall *%%ss:(%%esi); cld\n\t"
24281 + "push %%ss\n\t"
24282 + "pop %%ds\n\t"
24283 "jc 1f\n\t"
24284 "xor %%ah, %%ah\n"
24285 "1:"
24286 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24287 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24288 "b" (bx),
24289 "D" ((long)reg),
24290 - "S" (&pci_indirect));
24291 + "S" (&pci_indirect),
24292 + "r" (__PCIBIOS_DS));
24293 /*
24294 * Zero-extend the result beyond 8 bits, do not trust the
24295 * BIOS having done it:
24296 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24297 *value &= 0xff;
24298 break;
24299 case 2:
24300 - __asm__("lcall *(%%esi); cld\n\t"
24301 + __asm__("movw %w6, %%ds\n\t"
24302 + "lcall *%%ss:(%%esi); cld\n\t"
24303 + "push %%ss\n\t"
24304 + "pop %%ds\n\t"
24305 "jc 1f\n\t"
24306 "xor %%ah, %%ah\n"
24307 "1:"
24308 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24309 : "1" (PCIBIOS_READ_CONFIG_WORD),
24310 "b" (bx),
24311 "D" ((long)reg),
24312 - "S" (&pci_indirect));
24313 + "S" (&pci_indirect),
24314 + "r" (__PCIBIOS_DS));
24315 /*
24316 * Zero-extend the result beyond 16 bits, do not trust the
24317 * BIOS having done it:
24318 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24319 *value &= 0xffff;
24320 break;
24321 case 4:
24322 - __asm__("lcall *(%%esi); cld\n\t"
24323 + __asm__("movw %w6, %%ds\n\t"
24324 + "lcall *%%ss:(%%esi); cld\n\t"
24325 + "push %%ss\n\t"
24326 + "pop %%ds\n\t"
24327 "jc 1f\n\t"
24328 "xor %%ah, %%ah\n"
24329 "1:"
24330 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24331 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24332 "b" (bx),
24333 "D" ((long)reg),
24334 - "S" (&pci_indirect));
24335 + "S" (&pci_indirect),
24336 + "r" (__PCIBIOS_DS));
24337 break;
24338 }
24339
24340 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24341
24342 switch (len) {
24343 case 1:
24344 - __asm__("lcall *(%%esi); cld\n\t"
24345 + __asm__("movw %w6, %%ds\n\t"
24346 + "lcall *%%ss:(%%esi); cld\n\t"
24347 + "push %%ss\n\t"
24348 + "pop %%ds\n\t"
24349 "jc 1f\n\t"
24350 "xor %%ah, %%ah\n"
24351 "1:"
24352 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24353 "c" (value),
24354 "b" (bx),
24355 "D" ((long)reg),
24356 - "S" (&pci_indirect));
24357 + "S" (&pci_indirect),
24358 + "r" (__PCIBIOS_DS));
24359 break;
24360 case 2:
24361 - __asm__("lcall *(%%esi); cld\n\t"
24362 + __asm__("movw %w6, %%ds\n\t"
24363 + "lcall *%%ss:(%%esi); cld\n\t"
24364 + "push %%ss\n\t"
24365 + "pop %%ds\n\t"
24366 "jc 1f\n\t"
24367 "xor %%ah, %%ah\n"
24368 "1:"
24369 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24370 "c" (value),
24371 "b" (bx),
24372 "D" ((long)reg),
24373 - "S" (&pci_indirect));
24374 + "S" (&pci_indirect),
24375 + "r" (__PCIBIOS_DS));
24376 break;
24377 case 4:
24378 - __asm__("lcall *(%%esi); cld\n\t"
24379 + __asm__("movw %w6, %%ds\n\t"
24380 + "lcall *%%ss:(%%esi); cld\n\t"
24381 + "push %%ss\n\t"
24382 + "pop %%ds\n\t"
24383 "jc 1f\n\t"
24384 "xor %%ah, %%ah\n"
24385 "1:"
24386 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24387 "c" (value),
24388 "b" (bx),
24389 "D" ((long)reg),
24390 - "S" (&pci_indirect));
24391 + "S" (&pci_indirect),
24392 + "r" (__PCIBIOS_DS));
24393 break;
24394 }
24395
24396 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24397
24398 DBG("PCI: Fetching IRQ routing table... ");
24399 __asm__("push %%es\n\t"
24400 + "movw %w8, %%ds\n\t"
24401 "push %%ds\n\t"
24402 "pop %%es\n\t"
24403 - "lcall *(%%esi); cld\n\t"
24404 + "lcall *%%ss:(%%esi); cld\n\t"
24405 "pop %%es\n\t"
24406 + "push %%ss\n\t"
24407 + "pop %%ds\n"
24408 "jc 1f\n\t"
24409 "xor %%ah, %%ah\n"
24410 "1:"
24411 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24412 "1" (0),
24413 "D" ((long) &opt),
24414 "S" (&pci_indirect),
24415 - "m" (opt)
24416 + "m" (opt),
24417 + "r" (__PCIBIOS_DS)
24418 : "memory");
24419 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24420 if (ret & 0xff00)
24421 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24422 {
24423 int ret;
24424
24425 - __asm__("lcall *(%%esi); cld\n\t"
24426 + __asm__("movw %w5, %%ds\n\t"
24427 + "lcall *%%ss:(%%esi); cld\n\t"
24428 + "push %%ss\n\t"
24429 + "pop %%ds\n"
24430 "jc 1f\n\t"
24431 "xor %%ah, %%ah\n"
24432 "1:"
24433 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24434 : "0" (PCIBIOS_SET_PCI_HW_INT),
24435 "b" ((dev->bus->number << 8) | dev->devfn),
24436 "c" ((irq << 8) | (pin + 10)),
24437 - "S" (&pci_indirect));
24438 + "S" (&pci_indirect),
24439 + "r" (__PCIBIOS_DS));
24440 return !(ret & 0xff00);
24441 }
24442 EXPORT_SYMBOL(pcibios_set_irq_routing);
24443 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24444 index 40e4469..1ab536e 100644
24445 --- a/arch/x86/platform/efi/efi_32.c
24446 +++ b/arch/x86/platform/efi/efi_32.c
24447 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24448 {
24449 struct desc_ptr gdt_descr;
24450
24451 +#ifdef CONFIG_PAX_KERNEXEC
24452 + struct desc_struct d;
24453 +#endif
24454 +
24455 local_irq_save(efi_rt_eflags);
24456
24457 load_cr3(initial_page_table);
24458 __flush_tlb_all();
24459
24460 +#ifdef CONFIG_PAX_KERNEXEC
24461 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24462 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24463 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24464 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24465 +#endif
24466 +
24467 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24468 gdt_descr.size = GDT_SIZE - 1;
24469 load_gdt(&gdt_descr);
24470 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24471 {
24472 struct desc_ptr gdt_descr;
24473
24474 +#ifdef CONFIG_PAX_KERNEXEC
24475 + struct desc_struct d;
24476 +
24477 + memset(&d, 0, sizeof d);
24478 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24479 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24480 +#endif
24481 +
24482 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24483 gdt_descr.size = GDT_SIZE - 1;
24484 load_gdt(&gdt_descr);
24485 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24486 index fbe66e6..c5c0dd2 100644
24487 --- a/arch/x86/platform/efi/efi_stub_32.S
24488 +++ b/arch/x86/platform/efi/efi_stub_32.S
24489 @@ -6,7 +6,9 @@
24490 */
24491
24492 #include <linux/linkage.h>
24493 +#include <linux/init.h>
24494 #include <asm/page_types.h>
24495 +#include <asm/segment.h>
24496
24497 /*
24498 * efi_call_phys(void *, ...) is a function with variable parameters.
24499 @@ -20,7 +22,7 @@
24500 * service functions will comply with gcc calling convention, too.
24501 */
24502
24503 -.text
24504 +__INIT
24505 ENTRY(efi_call_phys)
24506 /*
24507 * 0. The function can only be called in Linux kernel. So CS has been
24508 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24509 * The mapping of lower virtual memory has been created in prelog and
24510 * epilog.
24511 */
24512 - movl $1f, %edx
24513 - subl $__PAGE_OFFSET, %edx
24514 - jmp *%edx
24515 + movl $(__KERNEXEC_EFI_DS), %edx
24516 + mov %edx, %ds
24517 + mov %edx, %es
24518 + mov %edx, %ss
24519 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24520 1:
24521
24522 /*
24523 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24524 * parameter 2, ..., param n. To make things easy, we save the return
24525 * address of efi_call_phys in a global variable.
24526 */
24527 - popl %edx
24528 - movl %edx, saved_return_addr
24529 - /* get the function pointer into ECX*/
24530 - popl %ecx
24531 - movl %ecx, efi_rt_function_ptr
24532 - movl $2f, %edx
24533 - subl $__PAGE_OFFSET, %edx
24534 - pushl %edx
24535 + popl (saved_return_addr)
24536 + popl (efi_rt_function_ptr)
24537
24538 /*
24539 * 3. Clear PG bit in %CR0.
24540 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24541 /*
24542 * 5. Call the physical function.
24543 */
24544 - jmp *%ecx
24545 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24546
24547 -2:
24548 /*
24549 * 6. After EFI runtime service returns, control will return to
24550 * following instruction. We'd better readjust stack pointer first.
24551 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24552 movl %cr0, %edx
24553 orl $0x80000000, %edx
24554 movl %edx, %cr0
24555 - jmp 1f
24556 -1:
24557 +
24558 /*
24559 * 8. Now restore the virtual mode from flat mode by
24560 * adding EIP with PAGE_OFFSET.
24561 */
24562 - movl $1f, %edx
24563 - jmp *%edx
24564 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24565 1:
24566 + movl $(__KERNEL_DS), %edx
24567 + mov %edx, %ds
24568 + mov %edx, %es
24569 + mov %edx, %ss
24570
24571 /*
24572 * 9. Balance the stack. And because EAX contain the return value,
24573 * we'd better not clobber it.
24574 */
24575 - leal efi_rt_function_ptr, %edx
24576 - movl (%edx), %ecx
24577 - pushl %ecx
24578 + pushl (efi_rt_function_ptr)
24579
24580 /*
24581 - * 10. Push the saved return address onto the stack and return.
24582 + * 10. Return to the saved return address.
24583 */
24584 - leal saved_return_addr, %edx
24585 - movl (%edx), %ecx
24586 - pushl %ecx
24587 - ret
24588 + jmpl *(saved_return_addr)
24589 ENDPROC(efi_call_phys)
24590 .previous
24591
24592 -.data
24593 +__INITDATA
24594 saved_return_addr:
24595 .long 0
24596 efi_rt_function_ptr:
24597 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24598 index 4c07cca..2c8427d 100644
24599 --- a/arch/x86/platform/efi/efi_stub_64.S
24600 +++ b/arch/x86/platform/efi/efi_stub_64.S
24601 @@ -7,6 +7,7 @@
24602 */
24603
24604 #include <linux/linkage.h>
24605 +#include <asm/alternative-asm.h>
24606
24607 #define SAVE_XMM \
24608 mov %rsp, %rax; \
24609 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24610 call *%rdi
24611 addq $32, %rsp
24612 RESTORE_XMM
24613 + pax_force_retaddr 0, 1
24614 ret
24615 ENDPROC(efi_call0)
24616
24617 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24618 call *%rdi
24619 addq $32, %rsp
24620 RESTORE_XMM
24621 + pax_force_retaddr 0, 1
24622 ret
24623 ENDPROC(efi_call1)
24624
24625 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24626 call *%rdi
24627 addq $32, %rsp
24628 RESTORE_XMM
24629 + pax_force_retaddr 0, 1
24630 ret
24631 ENDPROC(efi_call2)
24632
24633 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24634 call *%rdi
24635 addq $32, %rsp
24636 RESTORE_XMM
24637 + pax_force_retaddr 0, 1
24638 ret
24639 ENDPROC(efi_call3)
24640
24641 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24642 call *%rdi
24643 addq $32, %rsp
24644 RESTORE_XMM
24645 + pax_force_retaddr 0, 1
24646 ret
24647 ENDPROC(efi_call4)
24648
24649 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24650 call *%rdi
24651 addq $48, %rsp
24652 RESTORE_XMM
24653 + pax_force_retaddr 0, 1
24654 ret
24655 ENDPROC(efi_call5)
24656
24657 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24658 call *%rdi
24659 addq $48, %rsp
24660 RESTORE_XMM
24661 + pax_force_retaddr 0, 1
24662 ret
24663 ENDPROC(efi_call6)
24664 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24665 index ad4ec1c..686479e 100644
24666 --- a/arch/x86/platform/mrst/mrst.c
24667 +++ b/arch/x86/platform/mrst/mrst.c
24668 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24669 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24670 int sfi_mrtc_num;
24671
24672 -static void mrst_power_off(void)
24673 +static __noreturn void mrst_power_off(void)
24674 {
24675 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24676 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24677 + BUG();
24678 }
24679
24680 -static void mrst_reboot(void)
24681 +static __noreturn void mrst_reboot(void)
24682 {
24683 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24684 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24685 else
24686 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24687 + BUG();
24688 }
24689
24690 /* parse all the mtimer info to a static mtimer array */
24691 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24692 index f10c0af..3ec1f95 100644
24693 --- a/arch/x86/power/cpu.c
24694 +++ b/arch/x86/power/cpu.c
24695 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
24696 static void fix_processor_context(void)
24697 {
24698 int cpu = smp_processor_id();
24699 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24700 + struct tss_struct *t = init_tss + cpu;
24701
24702 set_tss_desc(cpu, t); /*
24703 * This just modifies memory; should not be
24704 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
24705 */
24706
24707 #ifdef CONFIG_X86_64
24708 + pax_open_kernel();
24709 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24710 + pax_close_kernel();
24711
24712 syscall_init(); /* This sets MSR_*STAR and related */
24713 #endif
24714 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24715 index 5d17950..2253fc9 100644
24716 --- a/arch/x86/vdso/Makefile
24717 +++ b/arch/x86/vdso/Makefile
24718 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24719 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24720 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24721
24722 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24723 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24724 GCOV_PROFILE := n
24725
24726 #
24727 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24728 index 468d591..8e80a0a 100644
24729 --- a/arch/x86/vdso/vdso32-setup.c
24730 +++ b/arch/x86/vdso/vdso32-setup.c
24731 @@ -25,6 +25,7 @@
24732 #include <asm/tlbflush.h>
24733 #include <asm/vdso.h>
24734 #include <asm/proto.h>
24735 +#include <asm/mman.h>
24736
24737 enum {
24738 VDSO_DISABLED = 0,
24739 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24740 void enable_sep_cpu(void)
24741 {
24742 int cpu = get_cpu();
24743 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24744 + struct tss_struct *tss = init_tss + cpu;
24745
24746 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24747 put_cpu();
24748 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24749 gate_vma.vm_start = FIXADDR_USER_START;
24750 gate_vma.vm_end = FIXADDR_USER_END;
24751 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24752 - gate_vma.vm_page_prot = __P101;
24753 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24754 /*
24755 * Make sure the vDSO gets into every core dump.
24756 * Dumping its contents makes post-mortem fully interpretable later
24757 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24758 if (compat)
24759 addr = VDSO_HIGH_BASE;
24760 else {
24761 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24762 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24763 if (IS_ERR_VALUE(addr)) {
24764 ret = addr;
24765 goto up_fail;
24766 }
24767 }
24768
24769 - current->mm->context.vdso = (void *)addr;
24770 + current->mm->context.vdso = addr;
24771
24772 if (compat_uses_vma || !compat) {
24773 /*
24774 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24775 }
24776
24777 current_thread_info()->sysenter_return =
24778 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24779 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24780
24781 up_fail:
24782 if (ret)
24783 - current->mm->context.vdso = NULL;
24784 + current->mm->context.vdso = 0;
24785
24786 up_write(&mm->mmap_sem);
24787
24788 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24789
24790 const char *arch_vma_name(struct vm_area_struct *vma)
24791 {
24792 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24793 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24794 return "[vdso]";
24795 +
24796 +#ifdef CONFIG_PAX_SEGMEXEC
24797 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24798 + return "[vdso]";
24799 +#endif
24800 +
24801 return NULL;
24802 }
24803
24804 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24805 * Check to see if the corresponding task was created in compat vdso
24806 * mode.
24807 */
24808 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24809 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24810 return &gate_vma;
24811 return NULL;
24812 }
24813 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24814 index 153407c..611cba9 100644
24815 --- a/arch/x86/vdso/vma.c
24816 +++ b/arch/x86/vdso/vma.c
24817 @@ -16,8 +16,6 @@
24818 #include <asm/vdso.h>
24819 #include <asm/page.h>
24820
24821 -unsigned int __read_mostly vdso_enabled = 1;
24822 -
24823 extern char vdso_start[], vdso_end[];
24824 extern unsigned short vdso_sync_cpuid;
24825
24826 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24827 * unaligned here as a result of stack start randomization.
24828 */
24829 addr = PAGE_ALIGN(addr);
24830 - addr = align_addr(addr, NULL, ALIGN_VDSO);
24831
24832 return addr;
24833 }
24834 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24835 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24836 {
24837 struct mm_struct *mm = current->mm;
24838 - unsigned long addr;
24839 + unsigned long addr = 0;
24840 int ret;
24841
24842 - if (!vdso_enabled)
24843 - return 0;
24844 -
24845 down_write(&mm->mmap_sem);
24846 +
24847 +#ifdef CONFIG_PAX_RANDMMAP
24848 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24849 +#endif
24850 +
24851 addr = vdso_addr(mm->start_stack, vdso_size);
24852 + addr = align_addr(addr, NULL, ALIGN_VDSO);
24853 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24854 if (IS_ERR_VALUE(addr)) {
24855 ret = addr;
24856 goto up_fail;
24857 }
24858
24859 - current->mm->context.vdso = (void *)addr;
24860 + mm->context.vdso = addr;
24861
24862 ret = install_special_mapping(mm, addr, vdso_size,
24863 VM_READ|VM_EXEC|
24864 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24865 VM_ALWAYSDUMP,
24866 vdso_pages);
24867 - if (ret) {
24868 - current->mm->context.vdso = NULL;
24869 - goto up_fail;
24870 - }
24871 +
24872 + if (ret)
24873 + mm->context.vdso = 0;
24874
24875 up_fail:
24876 up_write(&mm->mmap_sem);
24877 return ret;
24878 }
24879 -
24880 -static __init int vdso_setup(char *s)
24881 -{
24882 - vdso_enabled = simple_strtoul(s, NULL, 0);
24883 - return 0;
24884 -}
24885 -__setup("vdso=", vdso_setup);
24886 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24887 index 1f92865..c843b20 100644
24888 --- a/arch/x86/xen/enlighten.c
24889 +++ b/arch/x86/xen/enlighten.c
24890 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24891
24892 struct shared_info xen_dummy_shared_info;
24893
24894 -void *xen_initial_gdt;
24895 -
24896 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24897 __read_mostly int xen_have_vector_callback;
24898 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24899 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24900 #endif
24901 };
24902
24903 -static void xen_reboot(int reason)
24904 +static __noreturn void xen_reboot(int reason)
24905 {
24906 struct sched_shutdown r = { .reason = reason };
24907
24908 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24909 BUG();
24910 }
24911
24912 -static void xen_restart(char *msg)
24913 +static __noreturn void xen_restart(char *msg)
24914 {
24915 xen_reboot(SHUTDOWN_reboot);
24916 }
24917
24918 -static void xen_emergency_restart(void)
24919 +static __noreturn void xen_emergency_restart(void)
24920 {
24921 xen_reboot(SHUTDOWN_reboot);
24922 }
24923
24924 -static void xen_machine_halt(void)
24925 +static __noreturn void xen_machine_halt(void)
24926 {
24927 xen_reboot(SHUTDOWN_poweroff);
24928 }
24929 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24930 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24931
24932 /* Work out if we support NX */
24933 - x86_configure_nx();
24934 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24935 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24936 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24937 + unsigned l, h;
24938 +
24939 + __supported_pte_mask |= _PAGE_NX;
24940 + rdmsr(MSR_EFER, l, h);
24941 + l |= EFER_NX;
24942 + wrmsr(MSR_EFER, l, h);
24943 + }
24944 +#endif
24945
24946 xen_setup_features();
24947
24948 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24949
24950 machine_ops = xen_machine_ops;
24951
24952 - /*
24953 - * The only reliable way to retain the initial address of the
24954 - * percpu gdt_page is to remember it here, so we can go and
24955 - * mark it RW later, when the initial percpu area is freed.
24956 - */
24957 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24958 -
24959 xen_smp_init();
24960
24961 #ifdef CONFIG_ACPI_NUMA
24962 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24963 index 87f6673..e2555a6 100644
24964 --- a/arch/x86/xen/mmu.c
24965 +++ b/arch/x86/xen/mmu.c
24966 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24967 convert_pfn_mfn(init_level4_pgt);
24968 convert_pfn_mfn(level3_ident_pgt);
24969 convert_pfn_mfn(level3_kernel_pgt);
24970 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24971 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24972 + convert_pfn_mfn(level3_vmemmap_pgt);
24973
24974 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24975 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24976 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24977 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24978 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24979 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24980 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24981 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24982 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24983 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24984 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24985 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24986 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24987
24988 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24989 pv_mmu_ops.set_pud = xen_set_pud;
24990 #if PAGETABLE_LEVELS == 4
24991 pv_mmu_ops.set_pgd = xen_set_pgd;
24992 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24993 #endif
24994
24995 /* This will work as long as patching hasn't happened yet
24996 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24997 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24998 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24999 .set_pgd = xen_set_pgd_hyper,
25000 + .set_pgd_batched = xen_set_pgd_hyper,
25001
25002 .alloc_pud = xen_alloc_pmd_init,
25003 .release_pud = xen_release_pmd_init,
25004 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
25005 index 041d4fe..7666b7e 100644
25006 --- a/arch/x86/xen/smp.c
25007 +++ b/arch/x86/xen/smp.c
25008 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
25009 {
25010 BUG_ON(smp_processor_id() != 0);
25011 native_smp_prepare_boot_cpu();
25012 -
25013 - /* We've switched to the "real" per-cpu gdt, so make sure the
25014 - old memory can be recycled */
25015 - make_lowmem_page_readwrite(xen_initial_gdt);
25016 -
25017 xen_filter_cpu_maps();
25018 xen_setup_vcpu_info_placement();
25019 }
25020 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
25021 gdt = get_cpu_gdt_table(cpu);
25022
25023 ctxt->flags = VGCF_IN_KERNEL;
25024 - ctxt->user_regs.ds = __USER_DS;
25025 - ctxt->user_regs.es = __USER_DS;
25026 + ctxt->user_regs.ds = __KERNEL_DS;
25027 + ctxt->user_regs.es = __KERNEL_DS;
25028 ctxt->user_regs.ss = __KERNEL_DS;
25029 #ifdef CONFIG_X86_32
25030 ctxt->user_regs.fs = __KERNEL_PERCPU;
25031 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
25032 + savesegment(gs, ctxt->user_regs.gs);
25033 #else
25034 ctxt->gs_base_kernel = per_cpu_offset(cpu);
25035 #endif
25036 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
25037 int rc;
25038
25039 per_cpu(current_task, cpu) = idle;
25040 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
25041 #ifdef CONFIG_X86_32
25042 irq_ctx_init(cpu);
25043 #else
25044 clear_tsk_thread_flag(idle, TIF_FORK);
25045 - per_cpu(kernel_stack, cpu) =
25046 - (unsigned long)task_stack_page(idle) -
25047 - KERNEL_STACK_OFFSET + THREAD_SIZE;
25048 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25049 #endif
25050 xen_setup_runstate_info(cpu);
25051 xen_setup_timer(cpu);
25052 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
25053 index b040b0e..8cc4fe0 100644
25054 --- a/arch/x86/xen/xen-asm_32.S
25055 +++ b/arch/x86/xen/xen-asm_32.S
25056 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
25057 ESP_OFFSET=4 # bytes pushed onto stack
25058
25059 /*
25060 - * Store vcpu_info pointer for easy access. Do it this way to
25061 - * avoid having to reload %fs
25062 + * Store vcpu_info pointer for easy access.
25063 */
25064 #ifdef CONFIG_SMP
25065 - GET_THREAD_INFO(%eax)
25066 - movl TI_cpu(%eax), %eax
25067 - movl __per_cpu_offset(,%eax,4), %eax
25068 - mov xen_vcpu(%eax), %eax
25069 + push %fs
25070 + mov $(__KERNEL_PERCPU), %eax
25071 + mov %eax, %fs
25072 + mov PER_CPU_VAR(xen_vcpu), %eax
25073 + pop %fs
25074 #else
25075 movl xen_vcpu, %eax
25076 #endif
25077 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
25078 index aaa7291..3f77960 100644
25079 --- a/arch/x86/xen/xen-head.S
25080 +++ b/arch/x86/xen/xen-head.S
25081 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
25082 #ifdef CONFIG_X86_32
25083 mov %esi,xen_start_info
25084 mov $init_thread_union+THREAD_SIZE,%esp
25085 +#ifdef CONFIG_SMP
25086 + movl $cpu_gdt_table,%edi
25087 + movl $__per_cpu_load,%eax
25088 + movw %ax,__KERNEL_PERCPU + 2(%edi)
25089 + rorl $16,%eax
25090 + movb %al,__KERNEL_PERCPU + 4(%edi)
25091 + movb %ah,__KERNEL_PERCPU + 7(%edi)
25092 + movl $__per_cpu_end - 1,%eax
25093 + subl $__per_cpu_start,%eax
25094 + movw %ax,__KERNEL_PERCPU + 0(%edi)
25095 +#endif
25096 #else
25097 mov %rsi,xen_start_info
25098 mov $init_thread_union+THREAD_SIZE,%rsp
25099 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
25100 index b095739..8c17bcd 100644
25101 --- a/arch/x86/xen/xen-ops.h
25102 +++ b/arch/x86/xen/xen-ops.h
25103 @@ -10,8 +10,6 @@
25104 extern const char xen_hypervisor_callback[];
25105 extern const char xen_failsafe_callback[];
25106
25107 -extern void *xen_initial_gdt;
25108 -
25109 struct trap_info;
25110 void xen_copy_trap_info(struct trap_info *traps);
25111
25112 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
25113 index 58916af..9cb880b 100644
25114 --- a/block/blk-iopoll.c
25115 +++ b/block/blk-iopoll.c
25116 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
25117 }
25118 EXPORT_SYMBOL(blk_iopoll_complete);
25119
25120 -static void blk_iopoll_softirq(struct softirq_action *h)
25121 +static void blk_iopoll_softirq(void)
25122 {
25123 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
25124 int rearm = 0, budget = blk_iopoll_budget;
25125 diff --git a/block/blk-map.c b/block/blk-map.c
25126 index 623e1cd..ca1e109 100644
25127 --- a/block/blk-map.c
25128 +++ b/block/blk-map.c
25129 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
25130 if (!len || !kbuf)
25131 return -EINVAL;
25132
25133 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
25134 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
25135 if (do_copy)
25136 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
25137 else
25138 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
25139 index 1366a89..e17f54b 100644
25140 --- a/block/blk-softirq.c
25141 +++ b/block/blk-softirq.c
25142 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
25143 * Softirq action handler - move entries to local list and loop over them
25144 * while passing them to the queue registered handler.
25145 */
25146 -static void blk_done_softirq(struct softirq_action *h)
25147 +static void blk_done_softirq(void)
25148 {
25149 struct list_head *cpu_list, local_list;
25150
25151 diff --git a/block/bsg.c b/block/bsg.c
25152 index 702f131..37808bf 100644
25153 --- a/block/bsg.c
25154 +++ b/block/bsg.c
25155 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
25156 struct sg_io_v4 *hdr, struct bsg_device *bd,
25157 fmode_t has_write_perm)
25158 {
25159 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25160 + unsigned char *cmdptr;
25161 +
25162 if (hdr->request_len > BLK_MAX_CDB) {
25163 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
25164 if (!rq->cmd)
25165 return -ENOMEM;
25166 - }
25167 + cmdptr = rq->cmd;
25168 + } else
25169 + cmdptr = tmpcmd;
25170
25171 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
25172 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
25173 hdr->request_len))
25174 return -EFAULT;
25175
25176 + if (cmdptr != rq->cmd)
25177 + memcpy(rq->cmd, cmdptr, hdr->request_len);
25178 +
25179 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
25180 if (blk_verify_command(rq->cmd, has_write_perm))
25181 return -EPERM;
25182 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
25183 index 7b72502..646105c 100644
25184 --- a/block/compat_ioctl.c
25185 +++ b/block/compat_ioctl.c
25186 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
25187 err |= __get_user(f->spec1, &uf->spec1);
25188 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
25189 err |= __get_user(name, &uf->name);
25190 - f->name = compat_ptr(name);
25191 + f->name = (void __force_kernel *)compat_ptr(name);
25192 if (err) {
25193 err = -EFAULT;
25194 goto out;
25195 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
25196 index 688be8a..8a37d98 100644
25197 --- a/block/scsi_ioctl.c
25198 +++ b/block/scsi_ioctl.c
25199 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
25200 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
25201 struct sg_io_hdr *hdr, fmode_t mode)
25202 {
25203 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
25204 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25205 + unsigned char *cmdptr;
25206 +
25207 + if (rq->cmd != rq->__cmd)
25208 + cmdptr = rq->cmd;
25209 + else
25210 + cmdptr = tmpcmd;
25211 +
25212 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
25213 return -EFAULT;
25214 +
25215 + if (cmdptr != rq->cmd)
25216 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
25217 +
25218 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
25219 return -EPERM;
25220
25221 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25222 int err;
25223 unsigned int in_len, out_len, bytes, opcode, cmdlen;
25224 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
25225 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25226 + unsigned char *cmdptr;
25227
25228 if (!sic)
25229 return -EINVAL;
25230 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25231 */
25232 err = -EFAULT;
25233 rq->cmd_len = cmdlen;
25234 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
25235 +
25236 + if (rq->cmd != rq->__cmd)
25237 + cmdptr = rq->cmd;
25238 + else
25239 + cmdptr = tmpcmd;
25240 +
25241 + if (copy_from_user(cmdptr, sic->data, cmdlen))
25242 goto error;
25243
25244 + if (rq->cmd != cmdptr)
25245 + memcpy(rq->cmd, cmdptr, cmdlen);
25246 +
25247 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25248 goto error;
25249
25250 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25251 index 671d4d6..5f24030 100644
25252 --- a/crypto/cryptd.c
25253 +++ b/crypto/cryptd.c
25254 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25255
25256 struct cryptd_blkcipher_request_ctx {
25257 crypto_completion_t complete;
25258 -};
25259 +} __no_const;
25260
25261 struct cryptd_hash_ctx {
25262 struct crypto_shash *child;
25263 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25264
25265 struct cryptd_aead_request_ctx {
25266 crypto_completion_t complete;
25267 -};
25268 +} __no_const;
25269
25270 static void cryptd_queue_worker(struct work_struct *work);
25271
25272 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25273 index 5d41894..22021e4 100644
25274 --- a/drivers/acpi/apei/cper.c
25275 +++ b/drivers/acpi/apei/cper.c
25276 @@ -38,12 +38,12 @@
25277 */
25278 u64 cper_next_record_id(void)
25279 {
25280 - static atomic64_t seq;
25281 + static atomic64_unchecked_t seq;
25282
25283 - if (!atomic64_read(&seq))
25284 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
25285 + if (!atomic64_read_unchecked(&seq))
25286 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25287
25288 - return atomic64_inc_return(&seq);
25289 + return atomic64_inc_return_unchecked(&seq);
25290 }
25291 EXPORT_SYMBOL_GPL(cper_next_record_id);
25292
25293 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25294 index 6c47ae9..abfdd63 100644
25295 --- a/drivers/acpi/ec_sys.c
25296 +++ b/drivers/acpi/ec_sys.c
25297 @@ -12,6 +12,7 @@
25298 #include <linux/acpi.h>
25299 #include <linux/debugfs.h>
25300 #include <linux/module.h>
25301 +#include <linux/uaccess.h>
25302 #include "internal.h"
25303
25304 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25305 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25306 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25307 */
25308 unsigned int size = EC_SPACE_SIZE;
25309 - u8 *data = (u8 *) buf;
25310 + u8 data;
25311 loff_t init_off = *off;
25312 int err = 0;
25313
25314 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25315 size = count;
25316
25317 while (size) {
25318 - err = ec_read(*off, &data[*off - init_off]);
25319 + err = ec_read(*off, &data);
25320 if (err)
25321 return err;
25322 + if (put_user(data, &buf[*off - init_off]))
25323 + return -EFAULT;
25324 *off += 1;
25325 size--;
25326 }
25327 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25328
25329 unsigned int size = count;
25330 loff_t init_off = *off;
25331 - u8 *data = (u8 *) buf;
25332 int err = 0;
25333
25334 if (*off >= EC_SPACE_SIZE)
25335 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25336 }
25337
25338 while (size) {
25339 - u8 byte_write = data[*off - init_off];
25340 + u8 byte_write;
25341 + if (get_user(byte_write, &buf[*off - init_off]))
25342 + return -EFAULT;
25343 err = ec_write(*off, byte_write);
25344 if (err)
25345 return err;
25346 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25347 index 251c7b62..000462d 100644
25348 --- a/drivers/acpi/proc.c
25349 +++ b/drivers/acpi/proc.c
25350 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25351 size_t count, loff_t * ppos)
25352 {
25353 struct list_head *node, *next;
25354 - char strbuf[5];
25355 - char str[5] = "";
25356 - unsigned int len = count;
25357 + char strbuf[5] = {0};
25358
25359 - if (len > 4)
25360 - len = 4;
25361 - if (len < 0)
25362 + if (count > 4)
25363 + count = 4;
25364 + if (copy_from_user(strbuf, buffer, count))
25365 return -EFAULT;
25366 -
25367 - if (copy_from_user(strbuf, buffer, len))
25368 - return -EFAULT;
25369 - strbuf[len] = '\0';
25370 - sscanf(strbuf, "%s", str);
25371 + strbuf[count] = '\0';
25372
25373 mutex_lock(&acpi_device_lock);
25374 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25375 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25376 if (!dev->wakeup.flags.valid)
25377 continue;
25378
25379 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25380 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25381 if (device_can_wakeup(&dev->dev)) {
25382 bool enable = !device_may_wakeup(&dev->dev);
25383 device_set_wakeup_enable(&dev->dev, enable);
25384 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25385 index 9d7bc9f..a6fc091 100644
25386 --- a/drivers/acpi/processor_driver.c
25387 +++ b/drivers/acpi/processor_driver.c
25388 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25389 return 0;
25390 #endif
25391
25392 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25393 + BUG_ON(pr->id >= nr_cpu_ids);
25394
25395 /*
25396 * Buggy BIOS check
25397 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25398 index c04ad68..0b99473 100644
25399 --- a/drivers/ata/libata-core.c
25400 +++ b/drivers/ata/libata-core.c
25401 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25402 struct ata_port *ap;
25403 unsigned int tag;
25404
25405 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25406 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25407 ap = qc->ap;
25408
25409 qc->flags = 0;
25410 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25411 struct ata_port *ap;
25412 struct ata_link *link;
25413
25414 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25415 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25416 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25417 ap = qc->ap;
25418 link = qc->dev->link;
25419 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25420 return;
25421
25422 spin_lock(&lock);
25423 + pax_open_kernel();
25424
25425 for (cur = ops->inherits; cur; cur = cur->inherits) {
25426 void **inherit = (void **)cur;
25427 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25428 if (IS_ERR(*pp))
25429 *pp = NULL;
25430
25431 - ops->inherits = NULL;
25432 + *(struct ata_port_operations **)&ops->inherits = NULL;
25433
25434 + pax_close_kernel();
25435 spin_unlock(&lock);
25436 }
25437
25438 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25439 index e8574bb..f9f6a72 100644
25440 --- a/drivers/ata/pata_arasan_cf.c
25441 +++ b/drivers/ata/pata_arasan_cf.c
25442 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25443 /* Handle platform specific quirks */
25444 if (pdata->quirk) {
25445 if (pdata->quirk & CF_BROKEN_PIO) {
25446 - ap->ops->set_piomode = NULL;
25447 + pax_open_kernel();
25448 + *(void **)&ap->ops->set_piomode = NULL;
25449 + pax_close_kernel();
25450 ap->pio_mask = 0;
25451 }
25452 if (pdata->quirk & CF_BROKEN_MWDMA)
25453 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25454 index f9b983a..887b9d8 100644
25455 --- a/drivers/atm/adummy.c
25456 +++ b/drivers/atm/adummy.c
25457 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25458 vcc->pop(vcc, skb);
25459 else
25460 dev_kfree_skb_any(skb);
25461 - atomic_inc(&vcc->stats->tx);
25462 + atomic_inc_unchecked(&vcc->stats->tx);
25463
25464 return 0;
25465 }
25466 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25467 index f8f41e0..1f987dd 100644
25468 --- a/drivers/atm/ambassador.c
25469 +++ b/drivers/atm/ambassador.c
25470 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25471 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25472
25473 // VC layer stats
25474 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25475 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25476
25477 // free the descriptor
25478 kfree (tx_descr);
25479 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25480 dump_skb ("<<<", vc, skb);
25481
25482 // VC layer stats
25483 - atomic_inc(&atm_vcc->stats->rx);
25484 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25485 __net_timestamp(skb);
25486 // end of our responsibility
25487 atm_vcc->push (atm_vcc, skb);
25488 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25489 } else {
25490 PRINTK (KERN_INFO, "dropped over-size frame");
25491 // should we count this?
25492 - atomic_inc(&atm_vcc->stats->rx_drop);
25493 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25494 }
25495
25496 } else {
25497 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25498 }
25499
25500 if (check_area (skb->data, skb->len)) {
25501 - atomic_inc(&atm_vcc->stats->tx_err);
25502 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25503 return -ENOMEM; // ?
25504 }
25505
25506 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25507 index b22d71c..d6e1049 100644
25508 --- a/drivers/atm/atmtcp.c
25509 +++ b/drivers/atm/atmtcp.c
25510 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25511 if (vcc->pop) vcc->pop(vcc,skb);
25512 else dev_kfree_skb(skb);
25513 if (dev_data) return 0;
25514 - atomic_inc(&vcc->stats->tx_err);
25515 + atomic_inc_unchecked(&vcc->stats->tx_err);
25516 return -ENOLINK;
25517 }
25518 size = skb->len+sizeof(struct atmtcp_hdr);
25519 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25520 if (!new_skb) {
25521 if (vcc->pop) vcc->pop(vcc,skb);
25522 else dev_kfree_skb(skb);
25523 - atomic_inc(&vcc->stats->tx_err);
25524 + atomic_inc_unchecked(&vcc->stats->tx_err);
25525 return -ENOBUFS;
25526 }
25527 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25528 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25529 if (vcc->pop) vcc->pop(vcc,skb);
25530 else dev_kfree_skb(skb);
25531 out_vcc->push(out_vcc,new_skb);
25532 - atomic_inc(&vcc->stats->tx);
25533 - atomic_inc(&out_vcc->stats->rx);
25534 + atomic_inc_unchecked(&vcc->stats->tx);
25535 + atomic_inc_unchecked(&out_vcc->stats->rx);
25536 return 0;
25537 }
25538
25539 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25540 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25541 read_unlock(&vcc_sklist_lock);
25542 if (!out_vcc) {
25543 - atomic_inc(&vcc->stats->tx_err);
25544 + atomic_inc_unchecked(&vcc->stats->tx_err);
25545 goto done;
25546 }
25547 skb_pull(skb,sizeof(struct atmtcp_hdr));
25548 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25549 __net_timestamp(new_skb);
25550 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25551 out_vcc->push(out_vcc,new_skb);
25552 - atomic_inc(&vcc->stats->tx);
25553 - atomic_inc(&out_vcc->stats->rx);
25554 + atomic_inc_unchecked(&vcc->stats->tx);
25555 + atomic_inc_unchecked(&out_vcc->stats->rx);
25556 done:
25557 if (vcc->pop) vcc->pop(vcc,skb);
25558 else dev_kfree_skb(skb);
25559 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25560 index 956e9ac..133516d 100644
25561 --- a/drivers/atm/eni.c
25562 +++ b/drivers/atm/eni.c
25563 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25564 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25565 vcc->dev->number);
25566 length = 0;
25567 - atomic_inc(&vcc->stats->rx_err);
25568 + atomic_inc_unchecked(&vcc->stats->rx_err);
25569 }
25570 else {
25571 length = ATM_CELL_SIZE-1; /* no HEC */
25572 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25573 size);
25574 }
25575 eff = length = 0;
25576 - atomic_inc(&vcc->stats->rx_err);
25577 + atomic_inc_unchecked(&vcc->stats->rx_err);
25578 }
25579 else {
25580 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25581 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25582 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25583 vcc->dev->number,vcc->vci,length,size << 2,descr);
25584 length = eff = 0;
25585 - atomic_inc(&vcc->stats->rx_err);
25586 + atomic_inc_unchecked(&vcc->stats->rx_err);
25587 }
25588 }
25589 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25590 @@ -771,7 +771,7 @@ rx_dequeued++;
25591 vcc->push(vcc,skb);
25592 pushed++;
25593 }
25594 - atomic_inc(&vcc->stats->rx);
25595 + atomic_inc_unchecked(&vcc->stats->rx);
25596 }
25597 wake_up(&eni_dev->rx_wait);
25598 }
25599 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25600 PCI_DMA_TODEVICE);
25601 if (vcc->pop) vcc->pop(vcc,skb);
25602 else dev_kfree_skb_irq(skb);
25603 - atomic_inc(&vcc->stats->tx);
25604 + atomic_inc_unchecked(&vcc->stats->tx);
25605 wake_up(&eni_dev->tx_wait);
25606 dma_complete++;
25607 }
25608 @@ -1569,7 +1569,7 @@ tx_complete++;
25609 /*--------------------------------- entries ---------------------------------*/
25610
25611
25612 -static const char *media_name[] __devinitdata = {
25613 +static const char *media_name[] __devinitconst = {
25614 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25615 "UTP", "05?", "06?", "07?", /* 4- 7 */
25616 "TAXI","09?", "10?", "11?", /* 8-11 */
25617 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25618 index 5072f8a..fa52520d 100644
25619 --- a/drivers/atm/firestream.c
25620 +++ b/drivers/atm/firestream.c
25621 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25622 }
25623 }
25624
25625 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25626 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25627
25628 fs_dprintk (FS_DEBUG_TXMEM, "i");
25629 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25630 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25631 #endif
25632 skb_put (skb, qe->p1 & 0xffff);
25633 ATM_SKB(skb)->vcc = atm_vcc;
25634 - atomic_inc(&atm_vcc->stats->rx);
25635 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25636 __net_timestamp(skb);
25637 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25638 atm_vcc->push (atm_vcc, skb);
25639 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25640 kfree (pe);
25641 }
25642 if (atm_vcc)
25643 - atomic_inc(&atm_vcc->stats->rx_drop);
25644 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25645 break;
25646 case 0x1f: /* Reassembly abort: no buffers. */
25647 /* Silently increment error counter. */
25648 if (atm_vcc)
25649 - atomic_inc(&atm_vcc->stats->rx_drop);
25650 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25651 break;
25652 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25653 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25654 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25655 index 361f5ae..7fc552d 100644
25656 --- a/drivers/atm/fore200e.c
25657 +++ b/drivers/atm/fore200e.c
25658 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25659 #endif
25660 /* check error condition */
25661 if (*entry->status & STATUS_ERROR)
25662 - atomic_inc(&vcc->stats->tx_err);
25663 + atomic_inc_unchecked(&vcc->stats->tx_err);
25664 else
25665 - atomic_inc(&vcc->stats->tx);
25666 + atomic_inc_unchecked(&vcc->stats->tx);
25667 }
25668 }
25669
25670 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25671 if (skb == NULL) {
25672 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25673
25674 - atomic_inc(&vcc->stats->rx_drop);
25675 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25676 return -ENOMEM;
25677 }
25678
25679 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25680
25681 dev_kfree_skb_any(skb);
25682
25683 - atomic_inc(&vcc->stats->rx_drop);
25684 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25685 return -ENOMEM;
25686 }
25687
25688 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25689
25690 vcc->push(vcc, skb);
25691 - atomic_inc(&vcc->stats->rx);
25692 + atomic_inc_unchecked(&vcc->stats->rx);
25693
25694 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25695
25696 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25697 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25698 fore200e->atm_dev->number,
25699 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25700 - atomic_inc(&vcc->stats->rx_err);
25701 + atomic_inc_unchecked(&vcc->stats->rx_err);
25702 }
25703 }
25704
25705 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25706 goto retry_here;
25707 }
25708
25709 - atomic_inc(&vcc->stats->tx_err);
25710 + atomic_inc_unchecked(&vcc->stats->tx_err);
25711
25712 fore200e->tx_sat++;
25713 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25714 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25715 index 9a51df4..f3bb5f8 100644
25716 --- a/drivers/atm/he.c
25717 +++ b/drivers/atm/he.c
25718 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25719
25720 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25721 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25722 - atomic_inc(&vcc->stats->rx_drop);
25723 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25724 goto return_host_buffers;
25725 }
25726
25727 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25728 RBRQ_LEN_ERR(he_dev->rbrq_head)
25729 ? "LEN_ERR" : "",
25730 vcc->vpi, vcc->vci);
25731 - atomic_inc(&vcc->stats->rx_err);
25732 + atomic_inc_unchecked(&vcc->stats->rx_err);
25733 goto return_host_buffers;
25734 }
25735
25736 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25737 vcc->push(vcc, skb);
25738 spin_lock(&he_dev->global_lock);
25739
25740 - atomic_inc(&vcc->stats->rx);
25741 + atomic_inc_unchecked(&vcc->stats->rx);
25742
25743 return_host_buffers:
25744 ++pdus_assembled;
25745 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25746 tpd->vcc->pop(tpd->vcc, tpd->skb);
25747 else
25748 dev_kfree_skb_any(tpd->skb);
25749 - atomic_inc(&tpd->vcc->stats->tx_err);
25750 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25751 }
25752 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25753 return;
25754 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25755 vcc->pop(vcc, skb);
25756 else
25757 dev_kfree_skb_any(skb);
25758 - atomic_inc(&vcc->stats->tx_err);
25759 + atomic_inc_unchecked(&vcc->stats->tx_err);
25760 return -EINVAL;
25761 }
25762
25763 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25764 vcc->pop(vcc, skb);
25765 else
25766 dev_kfree_skb_any(skb);
25767 - atomic_inc(&vcc->stats->tx_err);
25768 + atomic_inc_unchecked(&vcc->stats->tx_err);
25769 return -EINVAL;
25770 }
25771 #endif
25772 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25773 vcc->pop(vcc, skb);
25774 else
25775 dev_kfree_skb_any(skb);
25776 - atomic_inc(&vcc->stats->tx_err);
25777 + atomic_inc_unchecked(&vcc->stats->tx_err);
25778 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25779 return -ENOMEM;
25780 }
25781 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25782 vcc->pop(vcc, skb);
25783 else
25784 dev_kfree_skb_any(skb);
25785 - atomic_inc(&vcc->stats->tx_err);
25786 + atomic_inc_unchecked(&vcc->stats->tx_err);
25787 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25788 return -ENOMEM;
25789 }
25790 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25791 __enqueue_tpd(he_dev, tpd, cid);
25792 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25793
25794 - atomic_inc(&vcc->stats->tx);
25795 + atomic_inc_unchecked(&vcc->stats->tx);
25796
25797 return 0;
25798 }
25799 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25800 index b812103..e391a49 100644
25801 --- a/drivers/atm/horizon.c
25802 +++ b/drivers/atm/horizon.c
25803 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25804 {
25805 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25806 // VC layer stats
25807 - atomic_inc(&vcc->stats->rx);
25808 + atomic_inc_unchecked(&vcc->stats->rx);
25809 __net_timestamp(skb);
25810 // end of our responsibility
25811 vcc->push (vcc, skb);
25812 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25813 dev->tx_iovec = NULL;
25814
25815 // VC layer stats
25816 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25817 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25818
25819 // free the skb
25820 hrz_kfree_skb (skb);
25821 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25822 index 1c05212..c28e200 100644
25823 --- a/drivers/atm/idt77252.c
25824 +++ b/drivers/atm/idt77252.c
25825 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25826 else
25827 dev_kfree_skb(skb);
25828
25829 - atomic_inc(&vcc->stats->tx);
25830 + atomic_inc_unchecked(&vcc->stats->tx);
25831 }
25832
25833 atomic_dec(&scq->used);
25834 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25835 if ((sb = dev_alloc_skb(64)) == NULL) {
25836 printk("%s: Can't allocate buffers for aal0.\n",
25837 card->name);
25838 - atomic_add(i, &vcc->stats->rx_drop);
25839 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25840 break;
25841 }
25842 if (!atm_charge(vcc, sb->truesize)) {
25843 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25844 card->name);
25845 - atomic_add(i - 1, &vcc->stats->rx_drop);
25846 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25847 dev_kfree_skb(sb);
25848 break;
25849 }
25850 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25851 ATM_SKB(sb)->vcc = vcc;
25852 __net_timestamp(sb);
25853 vcc->push(vcc, sb);
25854 - atomic_inc(&vcc->stats->rx);
25855 + atomic_inc_unchecked(&vcc->stats->rx);
25856
25857 cell += ATM_CELL_PAYLOAD;
25858 }
25859 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25860 "(CDC: %08x)\n",
25861 card->name, len, rpp->len, readl(SAR_REG_CDC));
25862 recycle_rx_pool_skb(card, rpp);
25863 - atomic_inc(&vcc->stats->rx_err);
25864 + atomic_inc_unchecked(&vcc->stats->rx_err);
25865 return;
25866 }
25867 if (stat & SAR_RSQE_CRC) {
25868 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25869 recycle_rx_pool_skb(card, rpp);
25870 - atomic_inc(&vcc->stats->rx_err);
25871 + atomic_inc_unchecked(&vcc->stats->rx_err);
25872 return;
25873 }
25874 if (skb_queue_len(&rpp->queue) > 1) {
25875 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25876 RXPRINTK("%s: Can't alloc RX skb.\n",
25877 card->name);
25878 recycle_rx_pool_skb(card, rpp);
25879 - atomic_inc(&vcc->stats->rx_err);
25880 + atomic_inc_unchecked(&vcc->stats->rx_err);
25881 return;
25882 }
25883 if (!atm_charge(vcc, skb->truesize)) {
25884 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25885 __net_timestamp(skb);
25886
25887 vcc->push(vcc, skb);
25888 - atomic_inc(&vcc->stats->rx);
25889 + atomic_inc_unchecked(&vcc->stats->rx);
25890
25891 return;
25892 }
25893 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25894 __net_timestamp(skb);
25895
25896 vcc->push(vcc, skb);
25897 - atomic_inc(&vcc->stats->rx);
25898 + atomic_inc_unchecked(&vcc->stats->rx);
25899
25900 if (skb->truesize > SAR_FB_SIZE_3)
25901 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25902 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25903 if (vcc->qos.aal != ATM_AAL0) {
25904 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25905 card->name, vpi, vci);
25906 - atomic_inc(&vcc->stats->rx_drop);
25907 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25908 goto drop;
25909 }
25910
25911 if ((sb = dev_alloc_skb(64)) == NULL) {
25912 printk("%s: Can't allocate buffers for AAL0.\n",
25913 card->name);
25914 - atomic_inc(&vcc->stats->rx_err);
25915 + atomic_inc_unchecked(&vcc->stats->rx_err);
25916 goto drop;
25917 }
25918
25919 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25920 ATM_SKB(sb)->vcc = vcc;
25921 __net_timestamp(sb);
25922 vcc->push(vcc, sb);
25923 - atomic_inc(&vcc->stats->rx);
25924 + atomic_inc_unchecked(&vcc->stats->rx);
25925
25926 drop:
25927 skb_pull(queue, 64);
25928 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25929
25930 if (vc == NULL) {
25931 printk("%s: NULL connection in send().\n", card->name);
25932 - atomic_inc(&vcc->stats->tx_err);
25933 + atomic_inc_unchecked(&vcc->stats->tx_err);
25934 dev_kfree_skb(skb);
25935 return -EINVAL;
25936 }
25937 if (!test_bit(VCF_TX, &vc->flags)) {
25938 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25939 - atomic_inc(&vcc->stats->tx_err);
25940 + atomic_inc_unchecked(&vcc->stats->tx_err);
25941 dev_kfree_skb(skb);
25942 return -EINVAL;
25943 }
25944 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25945 break;
25946 default:
25947 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25948 - atomic_inc(&vcc->stats->tx_err);
25949 + atomic_inc_unchecked(&vcc->stats->tx_err);
25950 dev_kfree_skb(skb);
25951 return -EINVAL;
25952 }
25953
25954 if (skb_shinfo(skb)->nr_frags != 0) {
25955 printk("%s: No scatter-gather yet.\n", card->name);
25956 - atomic_inc(&vcc->stats->tx_err);
25957 + atomic_inc_unchecked(&vcc->stats->tx_err);
25958 dev_kfree_skb(skb);
25959 return -EINVAL;
25960 }
25961 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25962
25963 err = queue_skb(card, vc, skb, oam);
25964 if (err) {
25965 - atomic_inc(&vcc->stats->tx_err);
25966 + atomic_inc_unchecked(&vcc->stats->tx_err);
25967 dev_kfree_skb(skb);
25968 return err;
25969 }
25970 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25971 skb = dev_alloc_skb(64);
25972 if (!skb) {
25973 printk("%s: Out of memory in send_oam().\n", card->name);
25974 - atomic_inc(&vcc->stats->tx_err);
25975 + atomic_inc_unchecked(&vcc->stats->tx_err);
25976 return -ENOMEM;
25977 }
25978 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25979 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25980 index 3d0c2b0..45441fa 100644
25981 --- a/drivers/atm/iphase.c
25982 +++ b/drivers/atm/iphase.c
25983 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25984 status = (u_short) (buf_desc_ptr->desc_mode);
25985 if (status & (RX_CER | RX_PTE | RX_OFL))
25986 {
25987 - atomic_inc(&vcc->stats->rx_err);
25988 + atomic_inc_unchecked(&vcc->stats->rx_err);
25989 IF_ERR(printk("IA: bad packet, dropping it");)
25990 if (status & RX_CER) {
25991 IF_ERR(printk(" cause: packet CRC error\n");)
25992 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25993 len = dma_addr - buf_addr;
25994 if (len > iadev->rx_buf_sz) {
25995 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25996 - atomic_inc(&vcc->stats->rx_err);
25997 + atomic_inc_unchecked(&vcc->stats->rx_err);
25998 goto out_free_desc;
25999 }
26000
26001 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26002 ia_vcc = INPH_IA_VCC(vcc);
26003 if (ia_vcc == NULL)
26004 {
26005 - atomic_inc(&vcc->stats->rx_err);
26006 + atomic_inc_unchecked(&vcc->stats->rx_err);
26007 dev_kfree_skb_any(skb);
26008 atm_return(vcc, atm_guess_pdu2truesize(len));
26009 goto INCR_DLE;
26010 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26011 if ((length > iadev->rx_buf_sz) || (length >
26012 (skb->len - sizeof(struct cpcs_trailer))))
26013 {
26014 - atomic_inc(&vcc->stats->rx_err);
26015 + atomic_inc_unchecked(&vcc->stats->rx_err);
26016 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26017 length, skb->len);)
26018 dev_kfree_skb_any(skb);
26019 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26020
26021 IF_RX(printk("rx_dle_intr: skb push");)
26022 vcc->push(vcc,skb);
26023 - atomic_inc(&vcc->stats->rx);
26024 + atomic_inc_unchecked(&vcc->stats->rx);
26025 iadev->rx_pkt_cnt++;
26026 }
26027 INCR_DLE:
26028 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
26029 {
26030 struct k_sonet_stats *stats;
26031 stats = &PRIV(_ia_dev[board])->sonet_stats;
26032 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26033 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26034 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26035 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26036 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26037 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26038 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26039 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26040 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26041 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26042 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26043 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26044 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26045 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26046 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26047 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26048 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26049 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26050 }
26051 ia_cmds.status = 0;
26052 break;
26053 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26054 if ((desc == 0) || (desc > iadev->num_tx_desc))
26055 {
26056 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26057 - atomic_inc(&vcc->stats->tx);
26058 + atomic_inc_unchecked(&vcc->stats->tx);
26059 if (vcc->pop)
26060 vcc->pop(vcc, skb);
26061 else
26062 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26063 ATM_DESC(skb) = vcc->vci;
26064 skb_queue_tail(&iadev->tx_dma_q, skb);
26065
26066 - atomic_inc(&vcc->stats->tx);
26067 + atomic_inc_unchecked(&vcc->stats->tx);
26068 iadev->tx_pkt_cnt++;
26069 /* Increment transaction counter */
26070 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26071
26072 #if 0
26073 /* add flow control logic */
26074 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26075 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26076 if (iavcc->vc_desc_cnt > 10) {
26077 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26078 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26079 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
26080 index f556969..0da15eb 100644
26081 --- a/drivers/atm/lanai.c
26082 +++ b/drivers/atm/lanai.c
26083 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
26084 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26085 lanai_endtx(lanai, lvcc);
26086 lanai_free_skb(lvcc->tx.atmvcc, skb);
26087 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26088 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26089 }
26090
26091 /* Try to fill the buffer - don't call unless there is backlog */
26092 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
26093 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26094 __net_timestamp(skb);
26095 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26096 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26097 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26098 out:
26099 lvcc->rx.buf.ptr = end;
26100 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26101 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26102 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26103 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26104 lanai->stats.service_rxnotaal5++;
26105 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26106 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26107 return 0;
26108 }
26109 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26110 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26111 int bytes;
26112 read_unlock(&vcc_sklist_lock);
26113 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26114 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26115 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26116 lvcc->stats.x.aal5.service_trash++;
26117 bytes = (SERVICE_GET_END(s) * 16) -
26118 (((unsigned long) lvcc->rx.buf.ptr) -
26119 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26120 }
26121 if (s & SERVICE_STREAM) {
26122 read_unlock(&vcc_sklist_lock);
26123 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26124 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26125 lvcc->stats.x.aal5.service_stream++;
26126 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26127 "PDU on VCI %d!\n", lanai->number, vci);
26128 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26129 return 0;
26130 }
26131 DPRINTK("got rx crc error on vci %d\n", vci);
26132 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26133 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26134 lvcc->stats.x.aal5.service_rxcrc++;
26135 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26136 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26137 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
26138 index 1c70c45..300718d 100644
26139 --- a/drivers/atm/nicstar.c
26140 +++ b/drivers/atm/nicstar.c
26141 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26142 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
26143 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
26144 card->index);
26145 - atomic_inc(&vcc->stats->tx_err);
26146 + atomic_inc_unchecked(&vcc->stats->tx_err);
26147 dev_kfree_skb_any(skb);
26148 return -EINVAL;
26149 }
26150 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26151 if (!vc->tx) {
26152 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
26153 card->index);
26154 - atomic_inc(&vcc->stats->tx_err);
26155 + atomic_inc_unchecked(&vcc->stats->tx_err);
26156 dev_kfree_skb_any(skb);
26157 return -EINVAL;
26158 }
26159 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26160 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
26161 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
26162 card->index);
26163 - atomic_inc(&vcc->stats->tx_err);
26164 + atomic_inc_unchecked(&vcc->stats->tx_err);
26165 dev_kfree_skb_any(skb);
26166 return -EINVAL;
26167 }
26168
26169 if (skb_shinfo(skb)->nr_frags != 0) {
26170 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26171 - atomic_inc(&vcc->stats->tx_err);
26172 + atomic_inc_unchecked(&vcc->stats->tx_err);
26173 dev_kfree_skb_any(skb);
26174 return -EINVAL;
26175 }
26176 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26177 }
26178
26179 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26180 - atomic_inc(&vcc->stats->tx_err);
26181 + atomic_inc_unchecked(&vcc->stats->tx_err);
26182 dev_kfree_skb_any(skb);
26183 return -EIO;
26184 }
26185 - atomic_inc(&vcc->stats->tx);
26186 + atomic_inc_unchecked(&vcc->stats->tx);
26187
26188 return 0;
26189 }
26190 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26191 printk
26192 ("nicstar%d: Can't allocate buffers for aal0.\n",
26193 card->index);
26194 - atomic_add(i, &vcc->stats->rx_drop);
26195 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26196 break;
26197 }
26198 if (!atm_charge(vcc, sb->truesize)) {
26199 RXPRINTK
26200 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26201 card->index);
26202 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26203 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26204 dev_kfree_skb_any(sb);
26205 break;
26206 }
26207 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26208 ATM_SKB(sb)->vcc = vcc;
26209 __net_timestamp(sb);
26210 vcc->push(vcc, sb);
26211 - atomic_inc(&vcc->stats->rx);
26212 + atomic_inc_unchecked(&vcc->stats->rx);
26213 cell += ATM_CELL_PAYLOAD;
26214 }
26215
26216 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26217 if (iovb == NULL) {
26218 printk("nicstar%d: Out of iovec buffers.\n",
26219 card->index);
26220 - atomic_inc(&vcc->stats->rx_drop);
26221 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26222 recycle_rx_buf(card, skb);
26223 return;
26224 }
26225 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26226 small or large buffer itself. */
26227 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26228 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26229 - atomic_inc(&vcc->stats->rx_err);
26230 + atomic_inc_unchecked(&vcc->stats->rx_err);
26231 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26232 NS_MAX_IOVECS);
26233 NS_PRV_IOVCNT(iovb) = 0;
26234 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26235 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26236 card->index);
26237 which_list(card, skb);
26238 - atomic_inc(&vcc->stats->rx_err);
26239 + atomic_inc_unchecked(&vcc->stats->rx_err);
26240 recycle_rx_buf(card, skb);
26241 vc->rx_iov = NULL;
26242 recycle_iov_buf(card, iovb);
26243 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26244 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26245 card->index);
26246 which_list(card, skb);
26247 - atomic_inc(&vcc->stats->rx_err);
26248 + atomic_inc_unchecked(&vcc->stats->rx_err);
26249 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26250 NS_PRV_IOVCNT(iovb));
26251 vc->rx_iov = NULL;
26252 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26253 printk(" - PDU size mismatch.\n");
26254 else
26255 printk(".\n");
26256 - atomic_inc(&vcc->stats->rx_err);
26257 + atomic_inc_unchecked(&vcc->stats->rx_err);
26258 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26259 NS_PRV_IOVCNT(iovb));
26260 vc->rx_iov = NULL;
26261 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26262 /* skb points to a small buffer */
26263 if (!atm_charge(vcc, skb->truesize)) {
26264 push_rxbufs(card, skb);
26265 - atomic_inc(&vcc->stats->rx_drop);
26266 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26267 } else {
26268 skb_put(skb, len);
26269 dequeue_sm_buf(card, skb);
26270 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26271 ATM_SKB(skb)->vcc = vcc;
26272 __net_timestamp(skb);
26273 vcc->push(vcc, skb);
26274 - atomic_inc(&vcc->stats->rx);
26275 + atomic_inc_unchecked(&vcc->stats->rx);
26276 }
26277 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26278 struct sk_buff *sb;
26279 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26280 if (len <= NS_SMBUFSIZE) {
26281 if (!atm_charge(vcc, sb->truesize)) {
26282 push_rxbufs(card, sb);
26283 - atomic_inc(&vcc->stats->rx_drop);
26284 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26285 } else {
26286 skb_put(sb, len);
26287 dequeue_sm_buf(card, sb);
26288 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26289 ATM_SKB(sb)->vcc = vcc;
26290 __net_timestamp(sb);
26291 vcc->push(vcc, sb);
26292 - atomic_inc(&vcc->stats->rx);
26293 + atomic_inc_unchecked(&vcc->stats->rx);
26294 }
26295
26296 push_rxbufs(card, skb);
26297 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26298
26299 if (!atm_charge(vcc, skb->truesize)) {
26300 push_rxbufs(card, skb);
26301 - atomic_inc(&vcc->stats->rx_drop);
26302 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26303 } else {
26304 dequeue_lg_buf(card, skb);
26305 #ifdef NS_USE_DESTRUCTORS
26306 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26307 ATM_SKB(skb)->vcc = vcc;
26308 __net_timestamp(skb);
26309 vcc->push(vcc, skb);
26310 - atomic_inc(&vcc->stats->rx);
26311 + atomic_inc_unchecked(&vcc->stats->rx);
26312 }
26313
26314 push_rxbufs(card, sb);
26315 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26316 printk
26317 ("nicstar%d: Out of huge buffers.\n",
26318 card->index);
26319 - atomic_inc(&vcc->stats->rx_drop);
26320 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26321 recycle_iovec_rx_bufs(card,
26322 (struct iovec *)
26323 iovb->data,
26324 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26325 card->hbpool.count++;
26326 } else
26327 dev_kfree_skb_any(hb);
26328 - atomic_inc(&vcc->stats->rx_drop);
26329 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26330 } else {
26331 /* Copy the small buffer to the huge buffer */
26332 sb = (struct sk_buff *)iov->iov_base;
26333 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26334 #endif /* NS_USE_DESTRUCTORS */
26335 __net_timestamp(hb);
26336 vcc->push(vcc, hb);
26337 - atomic_inc(&vcc->stats->rx);
26338 + atomic_inc_unchecked(&vcc->stats->rx);
26339 }
26340 }
26341
26342 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26343 index 5d1d076..12fbca4 100644
26344 --- a/drivers/atm/solos-pci.c
26345 +++ b/drivers/atm/solos-pci.c
26346 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26347 }
26348 atm_charge(vcc, skb->truesize);
26349 vcc->push(vcc, skb);
26350 - atomic_inc(&vcc->stats->rx);
26351 + atomic_inc_unchecked(&vcc->stats->rx);
26352 break;
26353
26354 case PKT_STATUS:
26355 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26356 vcc = SKB_CB(oldskb)->vcc;
26357
26358 if (vcc) {
26359 - atomic_inc(&vcc->stats->tx);
26360 + atomic_inc_unchecked(&vcc->stats->tx);
26361 solos_pop(vcc, oldskb);
26362 } else
26363 dev_kfree_skb_irq(oldskb);
26364 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26365 index 90f1ccc..04c4a1e 100644
26366 --- a/drivers/atm/suni.c
26367 +++ b/drivers/atm/suni.c
26368 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26369
26370
26371 #define ADD_LIMITED(s,v) \
26372 - atomic_add((v),&stats->s); \
26373 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26374 + atomic_add_unchecked((v),&stats->s); \
26375 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26376
26377
26378 static void suni_hz(unsigned long from_timer)
26379 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26380 index 5120a96..e2572bd 100644
26381 --- a/drivers/atm/uPD98402.c
26382 +++ b/drivers/atm/uPD98402.c
26383 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26384 struct sonet_stats tmp;
26385 int error = 0;
26386
26387 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26388 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26389 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26390 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26391 if (zero && !error) {
26392 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26393
26394
26395 #define ADD_LIMITED(s,v) \
26396 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26397 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26398 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26399 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26400 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26401 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26402
26403
26404 static void stat_event(struct atm_dev *dev)
26405 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26406 if (reason & uPD98402_INT_PFM) stat_event(dev);
26407 if (reason & uPD98402_INT_PCO) {
26408 (void) GET(PCOCR); /* clear interrupt cause */
26409 - atomic_add(GET(HECCT),
26410 + atomic_add_unchecked(GET(HECCT),
26411 &PRIV(dev)->sonet_stats.uncorr_hcs);
26412 }
26413 if ((reason & uPD98402_INT_RFO) &&
26414 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26415 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26416 uPD98402_INT_LOS),PIMR); /* enable them */
26417 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26418 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26419 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26420 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26421 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26422 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26423 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26424 return 0;
26425 }
26426
26427 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26428 index d889f56..17eb71e 100644
26429 --- a/drivers/atm/zatm.c
26430 +++ b/drivers/atm/zatm.c
26431 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26432 }
26433 if (!size) {
26434 dev_kfree_skb_irq(skb);
26435 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26436 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26437 continue;
26438 }
26439 if (!atm_charge(vcc,skb->truesize)) {
26440 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26441 skb->len = size;
26442 ATM_SKB(skb)->vcc = vcc;
26443 vcc->push(vcc,skb);
26444 - atomic_inc(&vcc->stats->rx);
26445 + atomic_inc_unchecked(&vcc->stats->rx);
26446 }
26447 zout(pos & 0xffff,MTA(mbx));
26448 #if 0 /* probably a stupid idea */
26449 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26450 skb_queue_head(&zatm_vcc->backlog,skb);
26451 break;
26452 }
26453 - atomic_inc(&vcc->stats->tx);
26454 + atomic_inc_unchecked(&vcc->stats->tx);
26455 wake_up(&zatm_vcc->tx_wait);
26456 }
26457
26458 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26459 index a4760e0..51283cf 100644
26460 --- a/drivers/base/devtmpfs.c
26461 +++ b/drivers/base/devtmpfs.c
26462 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26463 if (!thread)
26464 return 0;
26465
26466 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26467 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26468 if (err)
26469 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26470 else
26471 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26472 index caf995f..6f76697 100644
26473 --- a/drivers/base/power/wakeup.c
26474 +++ b/drivers/base/power/wakeup.c
26475 @@ -30,14 +30,14 @@ bool events_check_enabled;
26476 * They need to be modified together atomically, so it's better to use one
26477 * atomic variable to hold them both.
26478 */
26479 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26480 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26481
26482 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26483 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26484
26485 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26486 {
26487 - unsigned int comb = atomic_read(&combined_event_count);
26488 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26489
26490 *cnt = (comb >> IN_PROGRESS_BITS);
26491 *inpr = comb & MAX_IN_PROGRESS;
26492 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26493 ws->last_time = ktime_get();
26494
26495 /* Increment the counter of events in progress. */
26496 - atomic_inc(&combined_event_count);
26497 + atomic_inc_unchecked(&combined_event_count);
26498 }
26499
26500 /**
26501 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26502 * Increment the counter of registered wakeup events and decrement the
26503 * couter of wakeup events in progress simultaneously.
26504 */
26505 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26506 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26507 }
26508
26509 /**
26510 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26511 index b0f553b..77b928b 100644
26512 --- a/drivers/block/cciss.c
26513 +++ b/drivers/block/cciss.c
26514 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26515 int err;
26516 u32 cp;
26517
26518 + memset(&arg64, 0, sizeof(arg64));
26519 +
26520 err = 0;
26521 err |=
26522 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26523 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26524 while (!list_empty(&h->reqQ)) {
26525 c = list_entry(h->reqQ.next, CommandList_struct, list);
26526 /* can't do anything if fifo is full */
26527 - if ((h->access.fifo_full(h))) {
26528 + if ((h->access->fifo_full(h))) {
26529 dev_warn(&h->pdev->dev, "fifo full\n");
26530 break;
26531 }
26532 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26533 h->Qdepth--;
26534
26535 /* Tell the controller execute command */
26536 - h->access.submit_command(h, c);
26537 + h->access->submit_command(h, c);
26538
26539 /* Put job onto the completed Q */
26540 addQ(&h->cmpQ, c);
26541 @@ -3443,17 +3445,17 @@ startio:
26542
26543 static inline unsigned long get_next_completion(ctlr_info_t *h)
26544 {
26545 - return h->access.command_completed(h);
26546 + return h->access->command_completed(h);
26547 }
26548
26549 static inline int interrupt_pending(ctlr_info_t *h)
26550 {
26551 - return h->access.intr_pending(h);
26552 + return h->access->intr_pending(h);
26553 }
26554
26555 static inline long interrupt_not_for_us(ctlr_info_t *h)
26556 {
26557 - return ((h->access.intr_pending(h) == 0) ||
26558 + return ((h->access->intr_pending(h) == 0) ||
26559 (h->interrupts_enabled == 0));
26560 }
26561
26562 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26563 u32 a;
26564
26565 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26566 - return h->access.command_completed(h);
26567 + return h->access->command_completed(h);
26568
26569 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26570 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26571 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26572 trans_support & CFGTBL_Trans_use_short_tags);
26573
26574 /* Change the access methods to the performant access methods */
26575 - h->access = SA5_performant_access;
26576 + h->access = &SA5_performant_access;
26577 h->transMethod = CFGTBL_Trans_Performant;
26578
26579 return;
26580 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26581 if (prod_index < 0)
26582 return -ENODEV;
26583 h->product_name = products[prod_index].product_name;
26584 - h->access = *(products[prod_index].access);
26585 + h->access = products[prod_index].access;
26586
26587 if (cciss_board_disabled(h)) {
26588 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26589 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26590 }
26591
26592 /* make sure the board interrupts are off */
26593 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26594 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26595 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26596 if (rc)
26597 goto clean2;
26598 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26599 * fake ones to scoop up any residual completions.
26600 */
26601 spin_lock_irqsave(&h->lock, flags);
26602 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26603 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26604 spin_unlock_irqrestore(&h->lock, flags);
26605 free_irq(h->intr[h->intr_mode], h);
26606 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26607 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26608 dev_info(&h->pdev->dev, "Board READY.\n");
26609 dev_info(&h->pdev->dev,
26610 "Waiting for stale completions to drain.\n");
26611 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26612 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26613 msleep(10000);
26614 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26615 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26616
26617 rc = controller_reset_failed(h->cfgtable);
26618 if (rc)
26619 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26620 cciss_scsi_setup(h);
26621
26622 /* Turn the interrupts on so we can service requests */
26623 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26624 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26625
26626 /* Get the firmware version */
26627 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26628 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26629 kfree(flush_buf);
26630 if (return_code != IO_OK)
26631 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26632 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26633 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26634 free_irq(h->intr[h->intr_mode], h);
26635 }
26636
26637 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26638 index 7fda30e..eb5dfe0 100644
26639 --- a/drivers/block/cciss.h
26640 +++ b/drivers/block/cciss.h
26641 @@ -101,7 +101,7 @@ struct ctlr_info
26642 /* information about each logical volume */
26643 drive_info_struct *drv[CISS_MAX_LUN];
26644
26645 - struct access_method access;
26646 + struct access_method *access;
26647
26648 /* queue and queue Info */
26649 struct list_head reqQ;
26650 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26651 index 9125bbe..eede5c8 100644
26652 --- a/drivers/block/cpqarray.c
26653 +++ b/drivers/block/cpqarray.c
26654 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26655 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26656 goto Enomem4;
26657 }
26658 - hba[i]->access.set_intr_mask(hba[i], 0);
26659 + hba[i]->access->set_intr_mask(hba[i], 0);
26660 if (request_irq(hba[i]->intr, do_ida_intr,
26661 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26662 {
26663 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26664 add_timer(&hba[i]->timer);
26665
26666 /* Enable IRQ now that spinlock and rate limit timer are set up */
26667 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26668 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26669
26670 for(j=0; j<NWD; j++) {
26671 struct gendisk *disk = ida_gendisk[i][j];
26672 @@ -694,7 +694,7 @@ DBGINFO(
26673 for(i=0; i<NR_PRODUCTS; i++) {
26674 if (board_id == products[i].board_id) {
26675 c->product_name = products[i].product_name;
26676 - c->access = *(products[i].access);
26677 + c->access = products[i].access;
26678 break;
26679 }
26680 }
26681 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26682 hba[ctlr]->intr = intr;
26683 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26684 hba[ctlr]->product_name = products[j].product_name;
26685 - hba[ctlr]->access = *(products[j].access);
26686 + hba[ctlr]->access = products[j].access;
26687 hba[ctlr]->ctlr = ctlr;
26688 hba[ctlr]->board_id = board_id;
26689 hba[ctlr]->pci_dev = NULL; /* not PCI */
26690 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26691
26692 while((c = h->reqQ) != NULL) {
26693 /* Can't do anything if we're busy */
26694 - if (h->access.fifo_full(h) == 0)
26695 + if (h->access->fifo_full(h) == 0)
26696 return;
26697
26698 /* Get the first entry from the request Q */
26699 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26700 h->Qdepth--;
26701
26702 /* Tell the controller to do our bidding */
26703 - h->access.submit_command(h, c);
26704 + h->access->submit_command(h, c);
26705
26706 /* Get onto the completion Q */
26707 addQ(&h->cmpQ, c);
26708 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26709 unsigned long flags;
26710 __u32 a,a1;
26711
26712 - istat = h->access.intr_pending(h);
26713 + istat = h->access->intr_pending(h);
26714 /* Is this interrupt for us? */
26715 if (istat == 0)
26716 return IRQ_NONE;
26717 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26718 */
26719 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26720 if (istat & FIFO_NOT_EMPTY) {
26721 - while((a = h->access.command_completed(h))) {
26722 + while((a = h->access->command_completed(h))) {
26723 a1 = a; a &= ~3;
26724 if ((c = h->cmpQ) == NULL)
26725 {
26726 @@ -1449,11 +1449,11 @@ static int sendcmd(
26727 /*
26728 * Disable interrupt
26729 */
26730 - info_p->access.set_intr_mask(info_p, 0);
26731 + info_p->access->set_intr_mask(info_p, 0);
26732 /* Make sure there is room in the command FIFO */
26733 /* Actually it should be completely empty at this time. */
26734 for (i = 200000; i > 0; i--) {
26735 - temp = info_p->access.fifo_full(info_p);
26736 + temp = info_p->access->fifo_full(info_p);
26737 if (temp != 0) {
26738 break;
26739 }
26740 @@ -1466,7 +1466,7 @@ DBG(
26741 /*
26742 * Send the cmd
26743 */
26744 - info_p->access.submit_command(info_p, c);
26745 + info_p->access->submit_command(info_p, c);
26746 complete = pollcomplete(ctlr);
26747
26748 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26749 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26750 * we check the new geometry. Then turn interrupts back on when
26751 * we're done.
26752 */
26753 - host->access.set_intr_mask(host, 0);
26754 + host->access->set_intr_mask(host, 0);
26755 getgeometry(ctlr);
26756 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26757 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26758
26759 for(i=0; i<NWD; i++) {
26760 struct gendisk *disk = ida_gendisk[ctlr][i];
26761 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26762 /* Wait (up to 2 seconds) for a command to complete */
26763
26764 for (i = 200000; i > 0; i--) {
26765 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26766 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26767 if (done == 0) {
26768 udelay(10); /* a short fixed delay */
26769 } else
26770 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26771 index be73e9d..7fbf140 100644
26772 --- a/drivers/block/cpqarray.h
26773 +++ b/drivers/block/cpqarray.h
26774 @@ -99,7 +99,7 @@ struct ctlr_info {
26775 drv_info_t drv[NWD];
26776 struct proc_dir_entry *proc;
26777
26778 - struct access_method access;
26779 + struct access_method *access;
26780
26781 cmdlist_t *reqQ;
26782 cmdlist_t *cmpQ;
26783 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26784 index 9cf2035..bffca95 100644
26785 --- a/drivers/block/drbd/drbd_int.h
26786 +++ b/drivers/block/drbd/drbd_int.h
26787 @@ -736,7 +736,7 @@ struct drbd_request;
26788 struct drbd_epoch {
26789 struct list_head list;
26790 unsigned int barrier_nr;
26791 - atomic_t epoch_size; /* increased on every request added. */
26792 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26793 atomic_t active; /* increased on every req. added, and dec on every finished. */
26794 unsigned long flags;
26795 };
26796 @@ -1108,7 +1108,7 @@ struct drbd_conf {
26797 void *int_dig_in;
26798 void *int_dig_vv;
26799 wait_queue_head_t seq_wait;
26800 - atomic_t packet_seq;
26801 + atomic_unchecked_t packet_seq;
26802 unsigned int peer_seq;
26803 spinlock_t peer_seq_lock;
26804 unsigned int minor;
26805 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26806
26807 static inline void drbd_tcp_cork(struct socket *sock)
26808 {
26809 - int __user val = 1;
26810 + int val = 1;
26811 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26812 - (char __user *)&val, sizeof(val));
26813 + (char __force_user *)&val, sizeof(val));
26814 }
26815
26816 static inline void drbd_tcp_uncork(struct socket *sock)
26817 {
26818 - int __user val = 0;
26819 + int val = 0;
26820 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26821 - (char __user *)&val, sizeof(val));
26822 + (char __force_user *)&val, sizeof(val));
26823 }
26824
26825 static inline void drbd_tcp_nodelay(struct socket *sock)
26826 {
26827 - int __user val = 1;
26828 + int val = 1;
26829 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26830 - (char __user *)&val, sizeof(val));
26831 + (char __force_user *)&val, sizeof(val));
26832 }
26833
26834 static inline void drbd_tcp_quickack(struct socket *sock)
26835 {
26836 - int __user val = 2;
26837 + int val = 2;
26838 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26839 - (char __user *)&val, sizeof(val));
26840 + (char __force_user *)&val, sizeof(val));
26841 }
26842
26843 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26844 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26845 index 0358e55..bc33689 100644
26846 --- a/drivers/block/drbd/drbd_main.c
26847 +++ b/drivers/block/drbd/drbd_main.c
26848 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26849 p.sector = sector;
26850 p.block_id = block_id;
26851 p.blksize = blksize;
26852 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26853 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26854
26855 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26856 return false;
26857 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26858 p.sector = cpu_to_be64(req->sector);
26859 p.block_id = (unsigned long)req;
26860 p.seq_num = cpu_to_be32(req->seq_num =
26861 - atomic_add_return(1, &mdev->packet_seq));
26862 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26863
26864 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26865
26866 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26867 atomic_set(&mdev->unacked_cnt, 0);
26868 atomic_set(&mdev->local_cnt, 0);
26869 atomic_set(&mdev->net_cnt, 0);
26870 - atomic_set(&mdev->packet_seq, 0);
26871 + atomic_set_unchecked(&mdev->packet_seq, 0);
26872 atomic_set(&mdev->pp_in_use, 0);
26873 atomic_set(&mdev->pp_in_use_by_net, 0);
26874 atomic_set(&mdev->rs_sect_in, 0);
26875 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26876 mdev->receiver.t_state);
26877
26878 /* no need to lock it, I'm the only thread alive */
26879 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26880 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26881 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26882 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26883 mdev->al_writ_cnt =
26884 mdev->bm_writ_cnt =
26885 mdev->read_cnt =
26886 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26887 index af2a250..219c74b 100644
26888 --- a/drivers/block/drbd/drbd_nl.c
26889 +++ b/drivers/block/drbd/drbd_nl.c
26890 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26891 module_put(THIS_MODULE);
26892 }
26893
26894 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26895 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26896
26897 static unsigned short *
26898 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26899 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26900 cn_reply->id.idx = CN_IDX_DRBD;
26901 cn_reply->id.val = CN_VAL_DRBD;
26902
26903 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26904 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26905 cn_reply->ack = 0; /* not used here. */
26906 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26907 (int)((char *)tl - (char *)reply->tag_list);
26908 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26909 cn_reply->id.idx = CN_IDX_DRBD;
26910 cn_reply->id.val = CN_VAL_DRBD;
26911
26912 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26913 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26914 cn_reply->ack = 0; /* not used here. */
26915 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26916 (int)((char *)tl - (char *)reply->tag_list);
26917 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26918 cn_reply->id.idx = CN_IDX_DRBD;
26919 cn_reply->id.val = CN_VAL_DRBD;
26920
26921 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26922 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26923 cn_reply->ack = 0; // not used here.
26924 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26925 (int)((char*)tl - (char*)reply->tag_list);
26926 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26927 cn_reply->id.idx = CN_IDX_DRBD;
26928 cn_reply->id.val = CN_VAL_DRBD;
26929
26930 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26931 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26932 cn_reply->ack = 0; /* not used here. */
26933 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26934 (int)((char *)tl - (char *)reply->tag_list);
26935 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26936 index 43beaca..4a5b1dd 100644
26937 --- a/drivers/block/drbd/drbd_receiver.c
26938 +++ b/drivers/block/drbd/drbd_receiver.c
26939 @@ -894,7 +894,7 @@ retry:
26940 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26941 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26942
26943 - atomic_set(&mdev->packet_seq, 0);
26944 + atomic_set_unchecked(&mdev->packet_seq, 0);
26945 mdev->peer_seq = 0;
26946
26947 drbd_thread_start(&mdev->asender);
26948 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26949 do {
26950 next_epoch = NULL;
26951
26952 - epoch_size = atomic_read(&epoch->epoch_size);
26953 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26954
26955 switch (ev & ~EV_CLEANUP) {
26956 case EV_PUT:
26957 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26958 rv = FE_DESTROYED;
26959 } else {
26960 epoch->flags = 0;
26961 - atomic_set(&epoch->epoch_size, 0);
26962 + atomic_set_unchecked(&epoch->epoch_size, 0);
26963 /* atomic_set(&epoch->active, 0); is already zero */
26964 if (rv == FE_STILL_LIVE)
26965 rv = FE_RECYCLED;
26966 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26967 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26968 drbd_flush(mdev);
26969
26970 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26971 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26972 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26973 if (epoch)
26974 break;
26975 }
26976
26977 epoch = mdev->current_epoch;
26978 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26979 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26980
26981 D_ASSERT(atomic_read(&epoch->active) == 0);
26982 D_ASSERT(epoch->flags == 0);
26983 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26984 }
26985
26986 epoch->flags = 0;
26987 - atomic_set(&epoch->epoch_size, 0);
26988 + atomic_set_unchecked(&epoch->epoch_size, 0);
26989 atomic_set(&epoch->active, 0);
26990
26991 spin_lock(&mdev->epoch_lock);
26992 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26993 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26994 list_add(&epoch->list, &mdev->current_epoch->list);
26995 mdev->current_epoch = epoch;
26996 mdev->epochs++;
26997 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26998 spin_unlock(&mdev->peer_seq_lock);
26999
27000 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
27001 - atomic_inc(&mdev->current_epoch->epoch_size);
27002 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
27003 return drbd_drain_block(mdev, data_size);
27004 }
27005
27006 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
27007
27008 spin_lock(&mdev->epoch_lock);
27009 e->epoch = mdev->current_epoch;
27010 - atomic_inc(&e->epoch->epoch_size);
27011 + atomic_inc_unchecked(&e->epoch->epoch_size);
27012 atomic_inc(&e->epoch->active);
27013 spin_unlock(&mdev->epoch_lock);
27014
27015 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
27016 D_ASSERT(list_empty(&mdev->done_ee));
27017
27018 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
27019 - atomic_set(&mdev->current_epoch->epoch_size, 0);
27020 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
27021 D_ASSERT(list_empty(&mdev->current_epoch->list));
27022 }
27023
27024 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
27025 index 1e888c9..05cf1b0 100644
27026 --- a/drivers/block/loop.c
27027 +++ b/drivers/block/loop.c
27028 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
27029 mm_segment_t old_fs = get_fs();
27030
27031 set_fs(get_ds());
27032 - bw = file->f_op->write(file, buf, len, &pos);
27033 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
27034 set_fs(old_fs);
27035 if (likely(bw == len))
27036 return 0;
27037 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
27038 index 4364303..9adf4ee 100644
27039 --- a/drivers/char/Kconfig
27040 +++ b/drivers/char/Kconfig
27041 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
27042
27043 config DEVKMEM
27044 bool "/dev/kmem virtual device support"
27045 - default y
27046 + default n
27047 + depends on !GRKERNSEC_KMEM
27048 help
27049 Say Y here if you want to support the /dev/kmem device. The
27050 /dev/kmem device is rarely used, but can be used for certain
27051 @@ -596,6 +597,7 @@ config DEVPORT
27052 bool
27053 depends on !M68K
27054 depends on ISA || PCI
27055 + depends on !GRKERNSEC_KMEM
27056 default y
27057
27058 source "drivers/s390/char/Kconfig"
27059 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
27060 index 2e04433..22afc64 100644
27061 --- a/drivers/char/agp/frontend.c
27062 +++ b/drivers/char/agp/frontend.c
27063 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
27064 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27065 return -EFAULT;
27066
27067 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27068 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27069 return -EFAULT;
27070
27071 client = agp_find_client_by_pid(reserve.pid);
27072 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
27073 index 095ab90..afad0a4 100644
27074 --- a/drivers/char/briq_panel.c
27075 +++ b/drivers/char/briq_panel.c
27076 @@ -9,6 +9,7 @@
27077 #include <linux/types.h>
27078 #include <linux/errno.h>
27079 #include <linux/tty.h>
27080 +#include <linux/mutex.h>
27081 #include <linux/timer.h>
27082 #include <linux/kernel.h>
27083 #include <linux/wait.h>
27084 @@ -34,6 +35,7 @@ static int vfd_is_open;
27085 static unsigned char vfd[40];
27086 static int vfd_cursor;
27087 static unsigned char ledpb, led;
27088 +static DEFINE_MUTEX(vfd_mutex);
27089
27090 static void update_vfd(void)
27091 {
27092 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27093 if (!vfd_is_open)
27094 return -EBUSY;
27095
27096 + mutex_lock(&vfd_mutex);
27097 for (;;) {
27098 char c;
27099 if (!indx)
27100 break;
27101 - if (get_user(c, buf))
27102 + if (get_user(c, buf)) {
27103 + mutex_unlock(&vfd_mutex);
27104 return -EFAULT;
27105 + }
27106 if (esc) {
27107 set_led(c);
27108 esc = 0;
27109 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27110 buf++;
27111 }
27112 update_vfd();
27113 + mutex_unlock(&vfd_mutex);
27114
27115 return len;
27116 }
27117 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
27118 index f773a9d..65cd683 100644
27119 --- a/drivers/char/genrtc.c
27120 +++ b/drivers/char/genrtc.c
27121 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
27122 switch (cmd) {
27123
27124 case RTC_PLL_GET:
27125 + memset(&pll, 0, sizeof(pll));
27126 if (get_rtc_pll(&pll))
27127 return -EINVAL;
27128 else
27129 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
27130 index 0833896..cccce52 100644
27131 --- a/drivers/char/hpet.c
27132 +++ b/drivers/char/hpet.c
27133 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
27134 }
27135
27136 static int
27137 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
27138 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
27139 struct hpet_info *info)
27140 {
27141 struct hpet_timer __iomem *timer;
27142 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
27143 index 58c0e63..46c16bf 100644
27144 --- a/drivers/char/ipmi/ipmi_msghandler.c
27145 +++ b/drivers/char/ipmi/ipmi_msghandler.c
27146 @@ -415,7 +415,7 @@ struct ipmi_smi {
27147 struct proc_dir_entry *proc_dir;
27148 char proc_dir_name[10];
27149
27150 - atomic_t stats[IPMI_NUM_STATS];
27151 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27152
27153 /*
27154 * run_to_completion duplicate of smb_info, smi_info
27155 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27156
27157
27158 #define ipmi_inc_stat(intf, stat) \
27159 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27160 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27161 #define ipmi_get_stat(intf, stat) \
27162 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27163 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27164
27165 static int is_lan_addr(struct ipmi_addr *addr)
27166 {
27167 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
27168 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27169 init_waitqueue_head(&intf->waitq);
27170 for (i = 0; i < IPMI_NUM_STATS; i++)
27171 - atomic_set(&intf->stats[i], 0);
27172 + atomic_set_unchecked(&intf->stats[i], 0);
27173
27174 intf->proc_dir = NULL;
27175
27176 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27177 index 9397ab4..d01bee1 100644
27178 --- a/drivers/char/ipmi/ipmi_si_intf.c
27179 +++ b/drivers/char/ipmi/ipmi_si_intf.c
27180 @@ -277,7 +277,7 @@ struct smi_info {
27181 unsigned char slave_addr;
27182
27183 /* Counters and things for the proc filesystem. */
27184 - atomic_t stats[SI_NUM_STATS];
27185 + atomic_unchecked_t stats[SI_NUM_STATS];
27186
27187 struct task_struct *thread;
27188
27189 @@ -286,9 +286,9 @@ struct smi_info {
27190 };
27191
27192 #define smi_inc_stat(smi, stat) \
27193 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27194 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27195 #define smi_get_stat(smi, stat) \
27196 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27197 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27198
27199 #define SI_MAX_PARMS 4
27200
27201 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27202 atomic_set(&new_smi->req_events, 0);
27203 new_smi->run_to_completion = 0;
27204 for (i = 0; i < SI_NUM_STATS; i++)
27205 - atomic_set(&new_smi->stats[i], 0);
27206 + atomic_set_unchecked(&new_smi->stats[i], 0);
27207
27208 new_smi->interrupt_disabled = 1;
27209 atomic_set(&new_smi->stop_operation, 0);
27210 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27211 index 1aeaaba..e018570 100644
27212 --- a/drivers/char/mbcs.c
27213 +++ b/drivers/char/mbcs.c
27214 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27215 return 0;
27216 }
27217
27218 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27219 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27220 {
27221 .part_num = MBCS_PART_NUM,
27222 .mfg_num = MBCS_MFG_NUM,
27223 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27224 index 1451790..f705c30 100644
27225 --- a/drivers/char/mem.c
27226 +++ b/drivers/char/mem.c
27227 @@ -18,6 +18,7 @@
27228 #include <linux/raw.h>
27229 #include <linux/tty.h>
27230 #include <linux/capability.h>
27231 +#include <linux/security.h>
27232 #include <linux/ptrace.h>
27233 #include <linux/device.h>
27234 #include <linux/highmem.h>
27235 @@ -35,6 +36,10 @@
27236 # include <linux/efi.h>
27237 #endif
27238
27239 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27240 +extern const struct file_operations grsec_fops;
27241 +#endif
27242 +
27243 static inline unsigned long size_inside_page(unsigned long start,
27244 unsigned long size)
27245 {
27246 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27247
27248 while (cursor < to) {
27249 if (!devmem_is_allowed(pfn)) {
27250 +#ifdef CONFIG_GRKERNSEC_KMEM
27251 + gr_handle_mem_readwrite(from, to);
27252 +#else
27253 printk(KERN_INFO
27254 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27255 current->comm, from, to);
27256 +#endif
27257 return 0;
27258 }
27259 cursor += PAGE_SIZE;
27260 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27261 }
27262 return 1;
27263 }
27264 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27265 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27266 +{
27267 + return 0;
27268 +}
27269 #else
27270 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27271 {
27272 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27273
27274 while (count > 0) {
27275 unsigned long remaining;
27276 + char *temp;
27277
27278 sz = size_inside_page(p, count);
27279
27280 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27281 if (!ptr)
27282 return -EFAULT;
27283
27284 - remaining = copy_to_user(buf, ptr, sz);
27285 +#ifdef CONFIG_PAX_USERCOPY
27286 + temp = kmalloc(sz, GFP_KERNEL);
27287 + if (!temp) {
27288 + unxlate_dev_mem_ptr(p, ptr);
27289 + return -ENOMEM;
27290 + }
27291 + memcpy(temp, ptr, sz);
27292 +#else
27293 + temp = ptr;
27294 +#endif
27295 +
27296 + remaining = copy_to_user(buf, temp, sz);
27297 +
27298 +#ifdef CONFIG_PAX_USERCOPY
27299 + kfree(temp);
27300 +#endif
27301 +
27302 unxlate_dev_mem_ptr(p, ptr);
27303 if (remaining)
27304 return -EFAULT;
27305 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27306 size_t count, loff_t *ppos)
27307 {
27308 unsigned long p = *ppos;
27309 - ssize_t low_count, read, sz;
27310 + ssize_t low_count, read, sz, err = 0;
27311 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27312 - int err = 0;
27313
27314 read = 0;
27315 if (p < (unsigned long) high_memory) {
27316 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27317 }
27318 #endif
27319 while (low_count > 0) {
27320 + char *temp;
27321 +
27322 sz = size_inside_page(p, low_count);
27323
27324 /*
27325 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27326 */
27327 kbuf = xlate_dev_kmem_ptr((char *)p);
27328
27329 - if (copy_to_user(buf, kbuf, sz))
27330 +#ifdef CONFIG_PAX_USERCOPY
27331 + temp = kmalloc(sz, GFP_KERNEL);
27332 + if (!temp)
27333 + return -ENOMEM;
27334 + memcpy(temp, kbuf, sz);
27335 +#else
27336 + temp = kbuf;
27337 +#endif
27338 +
27339 + err = copy_to_user(buf, temp, sz);
27340 +
27341 +#ifdef CONFIG_PAX_USERCOPY
27342 + kfree(temp);
27343 +#endif
27344 +
27345 + if (err)
27346 return -EFAULT;
27347 buf += sz;
27348 p += sz;
27349 @@ -867,6 +914,9 @@ static const struct memdev {
27350 #ifdef CONFIG_CRASH_DUMP
27351 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27352 #endif
27353 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27354 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27355 +#endif
27356 };
27357
27358 static int memory_open(struct inode *inode, struct file *filp)
27359 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27360 index da3cfee..a5a6606 100644
27361 --- a/drivers/char/nvram.c
27362 +++ b/drivers/char/nvram.c
27363 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27364
27365 spin_unlock_irq(&rtc_lock);
27366
27367 - if (copy_to_user(buf, contents, tmp - contents))
27368 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27369 return -EFAULT;
27370
27371 *ppos = i;
27372 diff --git a/drivers/char/random.c b/drivers/char/random.c
27373 index 6035ab8..bdfe4fd 100644
27374 --- a/drivers/char/random.c
27375 +++ b/drivers/char/random.c
27376 @@ -261,8 +261,13 @@
27377 /*
27378 * Configuration information
27379 */
27380 +#ifdef CONFIG_GRKERNSEC_RANDNET
27381 +#define INPUT_POOL_WORDS 512
27382 +#define OUTPUT_POOL_WORDS 128
27383 +#else
27384 #define INPUT_POOL_WORDS 128
27385 #define OUTPUT_POOL_WORDS 32
27386 +#endif
27387 #define SEC_XFER_SIZE 512
27388 #define EXTRACT_SIZE 10
27389
27390 @@ -300,10 +305,17 @@ static struct poolinfo {
27391 int poolwords;
27392 int tap1, tap2, tap3, tap4, tap5;
27393 } poolinfo_table[] = {
27394 +#ifdef CONFIG_GRKERNSEC_RANDNET
27395 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27396 + { 512, 411, 308, 208, 104, 1 },
27397 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27398 + { 128, 103, 76, 51, 25, 1 },
27399 +#else
27400 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27401 { 128, 103, 76, 51, 25, 1 },
27402 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27403 { 32, 26, 20, 14, 7, 1 },
27404 +#endif
27405 #if 0
27406 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27407 { 2048, 1638, 1231, 819, 411, 1 },
27408 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27409
27410 extract_buf(r, tmp);
27411 i = min_t(int, nbytes, EXTRACT_SIZE);
27412 - if (copy_to_user(buf, tmp, i)) {
27413 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27414 ret = -EFAULT;
27415 break;
27416 }
27417 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27418 #include <linux/sysctl.h>
27419
27420 static int min_read_thresh = 8, min_write_thresh;
27421 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27422 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27423 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27424 static char sysctl_bootid[16];
27425
27426 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27427 index 1ee8ce7..b778bef 100644
27428 --- a/drivers/char/sonypi.c
27429 +++ b/drivers/char/sonypi.c
27430 @@ -55,6 +55,7 @@
27431 #include <asm/uaccess.h>
27432 #include <asm/io.h>
27433 #include <asm/system.h>
27434 +#include <asm/local.h>
27435
27436 #include <linux/sonypi.h>
27437
27438 @@ -491,7 +492,7 @@ static struct sonypi_device {
27439 spinlock_t fifo_lock;
27440 wait_queue_head_t fifo_proc_list;
27441 struct fasync_struct *fifo_async;
27442 - int open_count;
27443 + local_t open_count;
27444 int model;
27445 struct input_dev *input_jog_dev;
27446 struct input_dev *input_key_dev;
27447 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27448 static int sonypi_misc_release(struct inode *inode, struct file *file)
27449 {
27450 mutex_lock(&sonypi_device.lock);
27451 - sonypi_device.open_count--;
27452 + local_dec(&sonypi_device.open_count);
27453 mutex_unlock(&sonypi_device.lock);
27454 return 0;
27455 }
27456 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27457 {
27458 mutex_lock(&sonypi_device.lock);
27459 /* Flush input queue on first open */
27460 - if (!sonypi_device.open_count)
27461 + if (!local_read(&sonypi_device.open_count))
27462 kfifo_reset(&sonypi_device.fifo);
27463 - sonypi_device.open_count++;
27464 + local_inc(&sonypi_device.open_count);
27465 mutex_unlock(&sonypi_device.lock);
27466
27467 return 0;
27468 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27469 index 361a1df..2471eee 100644
27470 --- a/drivers/char/tpm/tpm.c
27471 +++ b/drivers/char/tpm/tpm.c
27472 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27473 chip->vendor.req_complete_val)
27474 goto out_recv;
27475
27476 - if ((status == chip->vendor.req_canceled)) {
27477 + if (status == chip->vendor.req_canceled) {
27478 dev_err(chip->dev, "Operation Canceled\n");
27479 rc = -ECANCELED;
27480 goto out;
27481 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27482 index 0636520..169c1d0 100644
27483 --- a/drivers/char/tpm/tpm_bios.c
27484 +++ b/drivers/char/tpm/tpm_bios.c
27485 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27486 event = addr;
27487
27488 if ((event->event_type == 0 && event->event_size == 0) ||
27489 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27490 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27491 return NULL;
27492
27493 return addr;
27494 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27495 return NULL;
27496
27497 if ((event->event_type == 0 && event->event_size == 0) ||
27498 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27499 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27500 return NULL;
27501
27502 (*pos)++;
27503 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27504 int i;
27505
27506 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27507 - seq_putc(m, data[i]);
27508 + if (!seq_putc(m, data[i]))
27509 + return -EFAULT;
27510
27511 return 0;
27512 }
27513 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27514 log->bios_event_log_end = log->bios_event_log + len;
27515
27516 virt = acpi_os_map_memory(start, len);
27517 + if (!virt) {
27518 + kfree(log->bios_event_log);
27519 + log->bios_event_log = NULL;
27520 + return -EFAULT;
27521 + }
27522
27523 - memcpy(log->bios_event_log, virt, len);
27524 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27525
27526 acpi_os_unmap_memory(virt, len);
27527 return 0;
27528 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27529 index 8e3c46d..c139b99 100644
27530 --- a/drivers/char/virtio_console.c
27531 +++ b/drivers/char/virtio_console.c
27532 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27533 if (to_user) {
27534 ssize_t ret;
27535
27536 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27537 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27538 if (ret)
27539 return -EFAULT;
27540 } else {
27541 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27542 if (!port_has_data(port) && !port->host_connected)
27543 return 0;
27544
27545 - return fill_readbuf(port, ubuf, count, true);
27546 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27547 }
27548
27549 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27550 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27551 index eb1d864..39ee5a7 100644
27552 --- a/drivers/dma/dmatest.c
27553 +++ b/drivers/dma/dmatest.c
27554 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27555 }
27556 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27557 cnt = dmatest_add_threads(dtc, DMA_PQ);
27558 - thread_count += cnt > 0 ?: 0;
27559 + thread_count += cnt > 0 ? cnt : 0;
27560 }
27561
27562 pr_info("dmatest: Started %u threads using %s\n",
27563 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27564 index c9eee6d..f9d5280 100644
27565 --- a/drivers/edac/amd64_edac.c
27566 +++ b/drivers/edac/amd64_edac.c
27567 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27568 * PCI core identifies what devices are on a system during boot, and then
27569 * inquiry this table to see if this driver is for a given device found.
27570 */
27571 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27572 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27573 {
27574 .vendor = PCI_VENDOR_ID_AMD,
27575 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27576 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27577 index e47e73b..348e0bd 100644
27578 --- a/drivers/edac/amd76x_edac.c
27579 +++ b/drivers/edac/amd76x_edac.c
27580 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27581 edac_mc_free(mci);
27582 }
27583
27584 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27585 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27586 {
27587 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27588 AMD762},
27589 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27590 index 1af531a..3a8ff27 100644
27591 --- a/drivers/edac/e752x_edac.c
27592 +++ b/drivers/edac/e752x_edac.c
27593 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27594 edac_mc_free(mci);
27595 }
27596
27597 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27598 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27599 {
27600 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27601 E7520},
27602 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27603 index 6ffb6d2..383d8d7 100644
27604 --- a/drivers/edac/e7xxx_edac.c
27605 +++ b/drivers/edac/e7xxx_edac.c
27606 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27607 edac_mc_free(mci);
27608 }
27609
27610 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27611 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27612 {
27613 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27614 E7205},
27615 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27616 index 495198a..ac08c85 100644
27617 --- a/drivers/edac/edac_pci_sysfs.c
27618 +++ b/drivers/edac/edac_pci_sysfs.c
27619 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27620 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27621 static int edac_pci_poll_msec = 1000; /* one second workq period */
27622
27623 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27624 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27625 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27626 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27627
27628 static struct kobject *edac_pci_top_main_kobj;
27629 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27630 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27631 edac_printk(KERN_CRIT, EDAC_PCI,
27632 "Signaled System Error on %s\n",
27633 pci_name(dev));
27634 - atomic_inc(&pci_nonparity_count);
27635 + atomic_inc_unchecked(&pci_nonparity_count);
27636 }
27637
27638 if (status & (PCI_STATUS_PARITY)) {
27639 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27640 "Master Data Parity Error on %s\n",
27641 pci_name(dev));
27642
27643 - atomic_inc(&pci_parity_count);
27644 + atomic_inc_unchecked(&pci_parity_count);
27645 }
27646
27647 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27648 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27649 "Detected Parity Error on %s\n",
27650 pci_name(dev));
27651
27652 - atomic_inc(&pci_parity_count);
27653 + atomic_inc_unchecked(&pci_parity_count);
27654 }
27655 }
27656
27657 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27658 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27659 "Signaled System Error on %s\n",
27660 pci_name(dev));
27661 - atomic_inc(&pci_nonparity_count);
27662 + atomic_inc_unchecked(&pci_nonparity_count);
27663 }
27664
27665 if (status & (PCI_STATUS_PARITY)) {
27666 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27667 "Master Data Parity Error on "
27668 "%s\n", pci_name(dev));
27669
27670 - atomic_inc(&pci_parity_count);
27671 + atomic_inc_unchecked(&pci_parity_count);
27672 }
27673
27674 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27675 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27676 "Detected Parity Error on %s\n",
27677 pci_name(dev));
27678
27679 - atomic_inc(&pci_parity_count);
27680 + atomic_inc_unchecked(&pci_parity_count);
27681 }
27682 }
27683 }
27684 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27685 if (!check_pci_errors)
27686 return;
27687
27688 - before_count = atomic_read(&pci_parity_count);
27689 + before_count = atomic_read_unchecked(&pci_parity_count);
27690
27691 /* scan all PCI devices looking for a Parity Error on devices and
27692 * bridges.
27693 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27694 /* Only if operator has selected panic on PCI Error */
27695 if (edac_pci_get_panic_on_pe()) {
27696 /* If the count is different 'after' from 'before' */
27697 - if (before_count != atomic_read(&pci_parity_count))
27698 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27699 panic("EDAC: PCI Parity Error");
27700 }
27701 }
27702 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27703 index c0510b3..6e2a954 100644
27704 --- a/drivers/edac/i3000_edac.c
27705 +++ b/drivers/edac/i3000_edac.c
27706 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27707 edac_mc_free(mci);
27708 }
27709
27710 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27711 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27712 {
27713 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27714 I3000},
27715 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27716 index aa08497..7e6822a 100644
27717 --- a/drivers/edac/i3200_edac.c
27718 +++ b/drivers/edac/i3200_edac.c
27719 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27720 edac_mc_free(mci);
27721 }
27722
27723 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27724 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27725 {
27726 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27727 I3200},
27728 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27729 index 4dc3ac2..67d05a6 100644
27730 --- a/drivers/edac/i5000_edac.c
27731 +++ b/drivers/edac/i5000_edac.c
27732 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27733 *
27734 * The "E500P" device is the first device supported.
27735 */
27736 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27737 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27738 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27739 .driver_data = I5000P},
27740
27741 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27742 index bcbdeec..9886d16 100644
27743 --- a/drivers/edac/i5100_edac.c
27744 +++ b/drivers/edac/i5100_edac.c
27745 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27746 edac_mc_free(mci);
27747 }
27748
27749 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27750 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27751 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27752 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27753 { 0, }
27754 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27755 index 74d6ec34..baff517 100644
27756 --- a/drivers/edac/i5400_edac.c
27757 +++ b/drivers/edac/i5400_edac.c
27758 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27759 *
27760 * The "E500P" device is the first device supported.
27761 */
27762 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27763 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27764 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27765 {0,} /* 0 terminated list. */
27766 };
27767 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27768 index 6104dba..e7ea8e1 100644
27769 --- a/drivers/edac/i7300_edac.c
27770 +++ b/drivers/edac/i7300_edac.c
27771 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27772 *
27773 * Has only 8086:360c PCI ID
27774 */
27775 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27776 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27777 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27778 {0,} /* 0 terminated list. */
27779 };
27780 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27781 index 70ad892..178943c 100644
27782 --- a/drivers/edac/i7core_edac.c
27783 +++ b/drivers/edac/i7core_edac.c
27784 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27785 /*
27786 * pci_device_id table for which devices we are looking for
27787 */
27788 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27789 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27790 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27791 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27792 {0,} /* 0 terminated list. */
27793 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27794 index 4329d39..f3022ef 100644
27795 --- a/drivers/edac/i82443bxgx_edac.c
27796 +++ b/drivers/edac/i82443bxgx_edac.c
27797 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27798
27799 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27800
27801 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27802 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27803 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27804 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27805 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27806 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27807 index 931a057..fd28340 100644
27808 --- a/drivers/edac/i82860_edac.c
27809 +++ b/drivers/edac/i82860_edac.c
27810 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27811 edac_mc_free(mci);
27812 }
27813
27814 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27815 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27816 {
27817 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27818 I82860},
27819 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27820 index 33864c6..01edc61 100644
27821 --- a/drivers/edac/i82875p_edac.c
27822 +++ b/drivers/edac/i82875p_edac.c
27823 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27824 edac_mc_free(mci);
27825 }
27826
27827 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27828 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27829 {
27830 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27831 I82875P},
27832 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27833 index a5da732..983363b 100644
27834 --- a/drivers/edac/i82975x_edac.c
27835 +++ b/drivers/edac/i82975x_edac.c
27836 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27837 edac_mc_free(mci);
27838 }
27839
27840 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27841 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27842 {
27843 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27844 I82975X
27845 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27846 index 0106747..0b40417 100644
27847 --- a/drivers/edac/mce_amd.h
27848 +++ b/drivers/edac/mce_amd.h
27849 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27850 bool (*dc_mce)(u16, u8);
27851 bool (*ic_mce)(u16, u8);
27852 bool (*nb_mce)(u16, u8);
27853 -};
27854 +} __no_const;
27855
27856 void amd_report_gart_errors(bool);
27857 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27858 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27859 index b153674..ad2ba9b 100644
27860 --- a/drivers/edac/r82600_edac.c
27861 +++ b/drivers/edac/r82600_edac.c
27862 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27863 edac_mc_free(mci);
27864 }
27865
27866 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27867 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27868 {
27869 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27870 },
27871 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27872 index 7a402bf..af0b211 100644
27873 --- a/drivers/edac/sb_edac.c
27874 +++ b/drivers/edac/sb_edac.c
27875 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27876 /*
27877 * pci_device_id table for which devices we are looking for
27878 */
27879 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27880 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27881 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27882 {0,} /* 0 terminated list. */
27883 };
27884 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27885 index b6f47de..c5acf3a 100644
27886 --- a/drivers/edac/x38_edac.c
27887 +++ b/drivers/edac/x38_edac.c
27888 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27889 edac_mc_free(mci);
27890 }
27891
27892 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27893 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27894 {
27895 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27896 X38},
27897 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27898 index 85661b0..c784559a 100644
27899 --- a/drivers/firewire/core-card.c
27900 +++ b/drivers/firewire/core-card.c
27901 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27902
27903 void fw_core_remove_card(struct fw_card *card)
27904 {
27905 - struct fw_card_driver dummy_driver = dummy_driver_template;
27906 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27907
27908 card->driver->update_phy_reg(card, 4,
27909 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27910 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27911 index 4799393..37bd3ab 100644
27912 --- a/drivers/firewire/core-cdev.c
27913 +++ b/drivers/firewire/core-cdev.c
27914 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27915 int ret;
27916
27917 if ((request->channels == 0 && request->bandwidth == 0) ||
27918 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27919 - request->bandwidth < 0)
27920 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27921 return -EINVAL;
27922
27923 r = kmalloc(sizeof(*r), GFP_KERNEL);
27924 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27925 index 855ab3f..11f4bbd 100644
27926 --- a/drivers/firewire/core-transaction.c
27927 +++ b/drivers/firewire/core-transaction.c
27928 @@ -37,6 +37,7 @@
27929 #include <linux/timer.h>
27930 #include <linux/types.h>
27931 #include <linux/workqueue.h>
27932 +#include <linux/sched.h>
27933
27934 #include <asm/byteorder.h>
27935
27936 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27937 index b45be57..5fad18b 100644
27938 --- a/drivers/firewire/core.h
27939 +++ b/drivers/firewire/core.h
27940 @@ -101,6 +101,7 @@ struct fw_card_driver {
27941
27942 int (*stop_iso)(struct fw_iso_context *ctx);
27943 };
27944 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27945
27946 void fw_card_initialize(struct fw_card *card,
27947 const struct fw_card_driver *driver, struct device *device);
27948 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27949 index 153980b..4b4d046 100644
27950 --- a/drivers/firmware/dmi_scan.c
27951 +++ b/drivers/firmware/dmi_scan.c
27952 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27953 }
27954 }
27955 else {
27956 - /*
27957 - * no iounmap() for that ioremap(); it would be a no-op, but
27958 - * it's so early in setup that sucker gets confused into doing
27959 - * what it shouldn't if we actually call it.
27960 - */
27961 p = dmi_ioremap(0xF0000, 0x10000);
27962 if (p == NULL)
27963 goto error;
27964 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27965 if (buf == NULL)
27966 return -1;
27967
27968 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27969 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27970
27971 iounmap(buf);
27972 return 0;
27973 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27974 index 98723cb..10ca85b 100644
27975 --- a/drivers/gpio/gpio-vr41xx.c
27976 +++ b/drivers/gpio/gpio-vr41xx.c
27977 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27978 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27979 maskl, pendl, maskh, pendh);
27980
27981 - atomic_inc(&irq_err_count);
27982 + atomic_inc_unchecked(&irq_err_count);
27983
27984 return -EINVAL;
27985 }
27986 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27987 index 8323fc3..5c1d755 100644
27988 --- a/drivers/gpu/drm/drm_crtc.c
27989 +++ b/drivers/gpu/drm/drm_crtc.c
27990 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27991 */
27992 if ((out_resp->count_modes >= mode_count) && mode_count) {
27993 copied = 0;
27994 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27995 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27996 list_for_each_entry(mode, &connector->modes, head) {
27997 drm_crtc_convert_to_umode(&u_mode, mode);
27998 if (copy_to_user(mode_ptr + copied,
27999 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28000
28001 if ((out_resp->count_props >= props_count) && props_count) {
28002 copied = 0;
28003 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
28004 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
28005 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
28006 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
28007 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
28008 if (connector->property_ids[i] != 0) {
28009 if (put_user(connector->property_ids[i],
28010 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28011
28012 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
28013 copied = 0;
28014 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
28015 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
28016 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
28017 if (connector->encoder_ids[i] != 0) {
28018 if (put_user(connector->encoder_ids[i],
28019 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
28020 }
28021
28022 for (i = 0; i < crtc_req->count_connectors; i++) {
28023 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
28024 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
28025 if (get_user(out_id, &set_connectors_ptr[i])) {
28026 ret = -EFAULT;
28027 goto out;
28028 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
28029 fb = obj_to_fb(obj);
28030
28031 num_clips = r->num_clips;
28032 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
28033 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
28034
28035 if (!num_clips != !clips_ptr) {
28036 ret = -EINVAL;
28037 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28038 out_resp->flags = property->flags;
28039
28040 if ((out_resp->count_values >= value_count) && value_count) {
28041 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
28042 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
28043 for (i = 0; i < value_count; i++) {
28044 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
28045 ret = -EFAULT;
28046 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28047 if (property->flags & DRM_MODE_PROP_ENUM) {
28048 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
28049 copied = 0;
28050 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
28051 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
28052 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
28053
28054 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
28055 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28056 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
28057 copied = 0;
28058 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
28059 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
28060 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
28061
28062 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
28063 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
28064 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28065 struct drm_mode_get_blob *out_resp = data;
28066 struct drm_property_blob *blob;
28067 int ret = 0;
28068 - void *blob_ptr;
28069 + void __user *blob_ptr;
28070
28071 if (!drm_core_check_feature(dev, DRIVER_MODESET))
28072 return -EINVAL;
28073 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28074 blob = obj_to_blob(obj);
28075
28076 if (out_resp->length == blob->length) {
28077 - blob_ptr = (void *)(unsigned long)out_resp->data;
28078 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
28079 if (copy_to_user(blob_ptr, blob->data, blob->length)){
28080 ret = -EFAULT;
28081 goto done;
28082 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
28083 index d2619d7..bd6bd00 100644
28084 --- a/drivers/gpu/drm/drm_crtc_helper.c
28085 +++ b/drivers/gpu/drm/drm_crtc_helper.c
28086 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
28087 struct drm_crtc *tmp;
28088 int crtc_mask = 1;
28089
28090 - WARN(!crtc, "checking null crtc?\n");
28091 + BUG_ON(!crtc);
28092
28093 dev = crtc->dev;
28094
28095 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
28096 index 40c187c..5746164 100644
28097 --- a/drivers/gpu/drm/drm_drv.c
28098 +++ b/drivers/gpu/drm/drm_drv.c
28099 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
28100 /**
28101 * Copy and IOCTL return string to user space
28102 */
28103 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
28104 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
28105 {
28106 int len;
28107
28108 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
28109
28110 dev = file_priv->minor->dev;
28111 atomic_inc(&dev->ioctl_count);
28112 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28113 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28114 ++file_priv->ioctl_count;
28115
28116 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28117 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
28118 index 828bf65..cdaa0e9 100644
28119 --- a/drivers/gpu/drm/drm_fops.c
28120 +++ b/drivers/gpu/drm/drm_fops.c
28121 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
28122 }
28123
28124 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28125 - atomic_set(&dev->counts[i], 0);
28126 + atomic_set_unchecked(&dev->counts[i], 0);
28127
28128 dev->sigdata.lock = NULL;
28129
28130 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
28131
28132 retcode = drm_open_helper(inode, filp, dev);
28133 if (!retcode) {
28134 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28135 - if (!dev->open_count++)
28136 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28137 + if (local_inc_return(&dev->open_count) == 1)
28138 retcode = drm_setup(dev);
28139 }
28140 if (!retcode) {
28141 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
28142
28143 mutex_lock(&drm_global_mutex);
28144
28145 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28146 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28147
28148 if (dev->driver->preclose)
28149 dev->driver->preclose(dev, file_priv);
28150 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
28151 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28152 task_pid_nr(current),
28153 (long)old_encode_dev(file_priv->minor->device),
28154 - dev->open_count);
28155 + local_read(&dev->open_count));
28156
28157 /* Release any auth tokens that might point to this file_priv,
28158 (do that under the drm_global_mutex) */
28159 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
28160 * End inline drm_release
28161 */
28162
28163 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28164 - if (!--dev->open_count) {
28165 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28166 + if (local_dec_and_test(&dev->open_count)) {
28167 if (atomic_read(&dev->ioctl_count)) {
28168 DRM_ERROR("Device busy: %d\n",
28169 atomic_read(&dev->ioctl_count));
28170 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
28171 index c87dc96..326055d 100644
28172 --- a/drivers/gpu/drm/drm_global.c
28173 +++ b/drivers/gpu/drm/drm_global.c
28174 @@ -36,7 +36,7 @@
28175 struct drm_global_item {
28176 struct mutex mutex;
28177 void *object;
28178 - int refcount;
28179 + atomic_t refcount;
28180 };
28181
28182 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28183 @@ -49,7 +49,7 @@ void drm_global_init(void)
28184 struct drm_global_item *item = &glob[i];
28185 mutex_init(&item->mutex);
28186 item->object = NULL;
28187 - item->refcount = 0;
28188 + atomic_set(&item->refcount, 0);
28189 }
28190 }
28191
28192 @@ -59,7 +59,7 @@ void drm_global_release(void)
28193 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28194 struct drm_global_item *item = &glob[i];
28195 BUG_ON(item->object != NULL);
28196 - BUG_ON(item->refcount != 0);
28197 + BUG_ON(atomic_read(&item->refcount) != 0);
28198 }
28199 }
28200
28201 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28202 void *object;
28203
28204 mutex_lock(&item->mutex);
28205 - if (item->refcount == 0) {
28206 + if (atomic_read(&item->refcount) == 0) {
28207 item->object = kzalloc(ref->size, GFP_KERNEL);
28208 if (unlikely(item->object == NULL)) {
28209 ret = -ENOMEM;
28210 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28211 goto out_err;
28212
28213 }
28214 - ++item->refcount;
28215 + atomic_inc(&item->refcount);
28216 ref->object = item->object;
28217 object = item->object;
28218 mutex_unlock(&item->mutex);
28219 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28220 struct drm_global_item *item = &glob[ref->global_type];
28221
28222 mutex_lock(&item->mutex);
28223 - BUG_ON(item->refcount == 0);
28224 + BUG_ON(atomic_read(&item->refcount) == 0);
28225 BUG_ON(ref->object != item->object);
28226 - if (--item->refcount == 0) {
28227 + if (atomic_dec_and_test(&item->refcount)) {
28228 ref->release(ref);
28229 item->object = NULL;
28230 }
28231 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28232 index ab1162d..42587b2 100644
28233 --- a/drivers/gpu/drm/drm_info.c
28234 +++ b/drivers/gpu/drm/drm_info.c
28235 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28236 struct drm_local_map *map;
28237 struct drm_map_list *r_list;
28238
28239 - /* Hardcoded from _DRM_FRAME_BUFFER,
28240 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28241 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28242 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28243 + static const char * const types[] = {
28244 + [_DRM_FRAME_BUFFER] = "FB",
28245 + [_DRM_REGISTERS] = "REG",
28246 + [_DRM_SHM] = "SHM",
28247 + [_DRM_AGP] = "AGP",
28248 + [_DRM_SCATTER_GATHER] = "SG",
28249 + [_DRM_CONSISTENT] = "PCI",
28250 + [_DRM_GEM] = "GEM" };
28251 const char *type;
28252 int i;
28253
28254 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28255 map = r_list->map;
28256 if (!map)
28257 continue;
28258 - if (map->type < 0 || map->type > 5)
28259 + if (map->type >= ARRAY_SIZE(types))
28260 type = "??";
28261 else
28262 type = types[map->type];
28263 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28264 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28265 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28266 vma->vm_flags & VM_IO ? 'i' : '-',
28267 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28268 + 0);
28269 +#else
28270 vma->vm_pgoff);
28271 +#endif
28272
28273 #if defined(__i386__)
28274 pgprot = pgprot_val(vma->vm_page_prot);
28275 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28276 index ddd70db..40321e6 100644
28277 --- a/drivers/gpu/drm/drm_ioc32.c
28278 +++ b/drivers/gpu/drm/drm_ioc32.c
28279 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28280 request = compat_alloc_user_space(nbytes);
28281 if (!access_ok(VERIFY_WRITE, request, nbytes))
28282 return -EFAULT;
28283 - list = (struct drm_buf_desc *) (request + 1);
28284 + list = (struct drm_buf_desc __user *) (request + 1);
28285
28286 if (__put_user(count, &request->count)
28287 || __put_user(list, &request->list))
28288 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28289 request = compat_alloc_user_space(nbytes);
28290 if (!access_ok(VERIFY_WRITE, request, nbytes))
28291 return -EFAULT;
28292 - list = (struct drm_buf_pub *) (request + 1);
28293 + list = (struct drm_buf_pub __user *) (request + 1);
28294
28295 if (__put_user(count, &request->count)
28296 || __put_user(list, &request->list))
28297 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28298 index 904d7e9..ab88581 100644
28299 --- a/drivers/gpu/drm/drm_ioctl.c
28300 +++ b/drivers/gpu/drm/drm_ioctl.c
28301 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28302 stats->data[i].value =
28303 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28304 else
28305 - stats->data[i].value = atomic_read(&dev->counts[i]);
28306 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28307 stats->data[i].type = dev->types[i];
28308 }
28309
28310 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28311 index 632ae24..244cf4a 100644
28312 --- a/drivers/gpu/drm/drm_lock.c
28313 +++ b/drivers/gpu/drm/drm_lock.c
28314 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28315 if (drm_lock_take(&master->lock, lock->context)) {
28316 master->lock.file_priv = file_priv;
28317 master->lock.lock_time = jiffies;
28318 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28319 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28320 break; /* Got lock */
28321 }
28322
28323 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28324 return -EINVAL;
28325 }
28326
28327 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28328 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28329
28330 if (drm_lock_free(&master->lock, lock->context)) {
28331 /* FIXME: Should really bail out here. */
28332 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28333 index 8f371e8..9f85d52 100644
28334 --- a/drivers/gpu/drm/i810/i810_dma.c
28335 +++ b/drivers/gpu/drm/i810/i810_dma.c
28336 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28337 dma->buflist[vertex->idx],
28338 vertex->discard, vertex->used);
28339
28340 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28341 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28342 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28343 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28344 sarea_priv->last_enqueue = dev_priv->counter - 1;
28345 sarea_priv->last_dispatch = (int)hw_status[5];
28346
28347 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28348 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28349 mc->last_render);
28350
28351 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28352 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28353 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28354 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28355 sarea_priv->last_enqueue = dev_priv->counter - 1;
28356 sarea_priv->last_dispatch = (int)hw_status[5];
28357
28358 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28359 index c9339f4..f5e1b9d 100644
28360 --- a/drivers/gpu/drm/i810/i810_drv.h
28361 +++ b/drivers/gpu/drm/i810/i810_drv.h
28362 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28363 int page_flipping;
28364
28365 wait_queue_head_t irq_queue;
28366 - atomic_t irq_received;
28367 - atomic_t irq_emitted;
28368 + atomic_unchecked_t irq_received;
28369 + atomic_unchecked_t irq_emitted;
28370
28371 int front_offset;
28372 } drm_i810_private_t;
28373 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28374 index b2e3c97..58cf079 100644
28375 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28376 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28377 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28378 I915_READ(GTIMR));
28379 }
28380 seq_printf(m, "Interrupts received: %d\n",
28381 - atomic_read(&dev_priv->irq_received));
28382 + atomic_read_unchecked(&dev_priv->irq_received));
28383 for (i = 0; i < I915_NUM_RINGS; i++) {
28384 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28385 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28386 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28387 return ret;
28388
28389 if (opregion->header)
28390 - seq_write(m, opregion->header, OPREGION_SIZE);
28391 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28392
28393 mutex_unlock(&dev->struct_mutex);
28394
28395 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28396 index c4da951..3c59c5c 100644
28397 --- a/drivers/gpu/drm/i915/i915_dma.c
28398 +++ b/drivers/gpu/drm/i915/i915_dma.c
28399 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28400 bool can_switch;
28401
28402 spin_lock(&dev->count_lock);
28403 - can_switch = (dev->open_count == 0);
28404 + can_switch = (local_read(&dev->open_count) == 0);
28405 spin_unlock(&dev->count_lock);
28406 return can_switch;
28407 }
28408 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28409 index ae294a0..1755461 100644
28410 --- a/drivers/gpu/drm/i915/i915_drv.h
28411 +++ b/drivers/gpu/drm/i915/i915_drv.h
28412 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28413 /* render clock increase/decrease */
28414 /* display clock increase/decrease */
28415 /* pll clock increase/decrease */
28416 -};
28417 +} __no_const;
28418
28419 struct intel_device_info {
28420 u8 gen;
28421 @@ -318,7 +318,7 @@ typedef struct drm_i915_private {
28422 int current_page;
28423 int page_flipping;
28424
28425 - atomic_t irq_received;
28426 + atomic_unchecked_t irq_received;
28427
28428 /* protects the irq masks */
28429 spinlock_t irq_lock;
28430 @@ -893,7 +893,7 @@ struct drm_i915_gem_object {
28431 * will be page flipped away on the next vblank. When it
28432 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28433 */
28434 - atomic_t pending_flip;
28435 + atomic_unchecked_t pending_flip;
28436 };
28437
28438 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28439 @@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28440 extern void intel_teardown_gmbus(struct drm_device *dev);
28441 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28442 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28443 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28444 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28445 {
28446 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28447 }
28448 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28449 index b9da890..cad1d98 100644
28450 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28451 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28452 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28453 i915_gem_clflush_object(obj);
28454
28455 if (obj->base.pending_write_domain)
28456 - cd->flips |= atomic_read(&obj->pending_flip);
28457 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28458
28459 /* The actual obj->write_domain will be updated with
28460 * pending_write_domain after we emit the accumulated flush for all
28461 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28462
28463 static int
28464 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28465 - int count)
28466 + unsigned int count)
28467 {
28468 - int i;
28469 + unsigned int i;
28470
28471 for (i = 0; i < count; i++) {
28472 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28473 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28474 index d47a53b..61154c2 100644
28475 --- a/drivers/gpu/drm/i915/i915_irq.c
28476 +++ b/drivers/gpu/drm/i915/i915_irq.c
28477 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28478 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28479 struct drm_i915_master_private *master_priv;
28480
28481 - atomic_inc(&dev_priv->irq_received);
28482 + atomic_inc_unchecked(&dev_priv->irq_received);
28483
28484 /* disable master interrupt before clearing iir */
28485 de_ier = I915_READ(DEIER);
28486 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28487 struct drm_i915_master_private *master_priv;
28488 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28489
28490 - atomic_inc(&dev_priv->irq_received);
28491 + atomic_inc_unchecked(&dev_priv->irq_received);
28492
28493 if (IS_GEN6(dev))
28494 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28495 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28496 int ret = IRQ_NONE, pipe;
28497 bool blc_event = false;
28498
28499 - atomic_inc(&dev_priv->irq_received);
28500 + atomic_inc_unchecked(&dev_priv->irq_received);
28501
28502 iir = I915_READ(IIR);
28503
28504 @@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28505 {
28506 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28507
28508 - atomic_set(&dev_priv->irq_received, 0);
28509 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28510
28511 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28512 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28513 @@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28514 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28515 int pipe;
28516
28517 - atomic_set(&dev_priv->irq_received, 0);
28518 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28519
28520 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28521 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28522 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28523 index daa5743..c0757a9 100644
28524 --- a/drivers/gpu/drm/i915/intel_display.c
28525 +++ b/drivers/gpu/drm/i915/intel_display.c
28526 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28527
28528 wait_event(dev_priv->pending_flip_queue,
28529 atomic_read(&dev_priv->mm.wedged) ||
28530 - atomic_read(&obj->pending_flip) == 0);
28531 + atomic_read_unchecked(&obj->pending_flip) == 0);
28532
28533 /* Big Hammer, we also need to ensure that any pending
28534 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28535 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28536 obj = to_intel_framebuffer(crtc->fb)->obj;
28537 dev_priv = crtc->dev->dev_private;
28538 wait_event(dev_priv->pending_flip_queue,
28539 - atomic_read(&obj->pending_flip) == 0);
28540 + atomic_read_unchecked(&obj->pending_flip) == 0);
28541 }
28542
28543 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28544 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28545
28546 atomic_clear_mask(1 << intel_crtc->plane,
28547 &obj->pending_flip.counter);
28548 - if (atomic_read(&obj->pending_flip) == 0)
28549 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28550 wake_up(&dev_priv->pending_flip_queue);
28551
28552 schedule_work(&work->work);
28553 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28554 /* Block clients from rendering to the new back buffer until
28555 * the flip occurs and the object is no longer visible.
28556 */
28557 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28558 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28559
28560 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28561 if (ret)
28562 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28563 return 0;
28564
28565 cleanup_pending:
28566 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28567 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28568 drm_gem_object_unreference(&work->old_fb_obj->base);
28569 drm_gem_object_unreference(&obj->base);
28570 mutex_unlock(&dev->struct_mutex);
28571 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28572 index 54558a0..2d97005 100644
28573 --- a/drivers/gpu/drm/mga/mga_drv.h
28574 +++ b/drivers/gpu/drm/mga/mga_drv.h
28575 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28576 u32 clear_cmd;
28577 u32 maccess;
28578
28579 - atomic_t vbl_received; /**< Number of vblanks received. */
28580 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28581 wait_queue_head_t fence_queue;
28582 - atomic_t last_fence_retired;
28583 + atomic_unchecked_t last_fence_retired;
28584 u32 next_fence_to_post;
28585
28586 unsigned int fb_cpp;
28587 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28588 index 2581202..f230a8d9 100644
28589 --- a/drivers/gpu/drm/mga/mga_irq.c
28590 +++ b/drivers/gpu/drm/mga/mga_irq.c
28591 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28592 if (crtc != 0)
28593 return 0;
28594
28595 - return atomic_read(&dev_priv->vbl_received);
28596 + return atomic_read_unchecked(&dev_priv->vbl_received);
28597 }
28598
28599
28600 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28601 /* VBLANK interrupt */
28602 if (status & MGA_VLINEPEN) {
28603 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28604 - atomic_inc(&dev_priv->vbl_received);
28605 + atomic_inc_unchecked(&dev_priv->vbl_received);
28606 drm_handle_vblank(dev, 0);
28607 handled = 1;
28608 }
28609 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28610 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28611 MGA_WRITE(MGA_PRIMEND, prim_end);
28612
28613 - atomic_inc(&dev_priv->last_fence_retired);
28614 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28615 DRM_WAKEUP(&dev_priv->fence_queue);
28616 handled = 1;
28617 }
28618 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28619 * using fences.
28620 */
28621 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28622 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28623 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28624 - *sequence) <= (1 << 23)));
28625
28626 *sequence = cur_fence;
28627 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28628 index 5fc201b..7b032b9 100644
28629 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28630 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28631 @@ -201,7 +201,7 @@ struct methods {
28632 const char desc[8];
28633 void (*loadbios)(struct drm_device *, uint8_t *);
28634 const bool rw;
28635 -};
28636 +} __do_const;
28637
28638 static struct methods shadow_methods[] = {
28639 { "PRAMIN", load_vbios_pramin, true },
28640 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28641 struct bit_table {
28642 const char id;
28643 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28644 -};
28645 +} __no_const;
28646
28647 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28648
28649 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28650 index 4c0be3a..5757582 100644
28651 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28652 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28653 @@ -238,7 +238,7 @@ struct nouveau_channel {
28654 struct list_head pending;
28655 uint32_t sequence;
28656 uint32_t sequence_ack;
28657 - atomic_t last_sequence_irq;
28658 + atomic_unchecked_t last_sequence_irq;
28659 struct nouveau_vma vma;
28660 } fence;
28661
28662 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28663 u32 handle, u16 class);
28664 void (*set_tile_region)(struct drm_device *dev, int i);
28665 void (*tlb_flush)(struct drm_device *, int engine);
28666 -};
28667 +} __no_const;
28668
28669 struct nouveau_instmem_engine {
28670 void *priv;
28671 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28672 struct nouveau_mc_engine {
28673 int (*init)(struct drm_device *dev);
28674 void (*takedown)(struct drm_device *dev);
28675 -};
28676 +} __no_const;
28677
28678 struct nouveau_timer_engine {
28679 int (*init)(struct drm_device *dev);
28680 void (*takedown)(struct drm_device *dev);
28681 uint64_t (*read)(struct drm_device *dev);
28682 -};
28683 +} __no_const;
28684
28685 struct nouveau_fb_engine {
28686 int num_tiles;
28687 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28688 void (*put)(struct drm_device *, struct nouveau_mem **);
28689
28690 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28691 -};
28692 +} __no_const;
28693
28694 struct nouveau_engine {
28695 struct nouveau_instmem_engine instmem;
28696 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
28697 struct drm_global_reference mem_global_ref;
28698 struct ttm_bo_global_ref bo_global_ref;
28699 struct ttm_bo_device bdev;
28700 - atomic_t validate_sequence;
28701 + atomic_unchecked_t validate_sequence;
28702 } ttm;
28703
28704 struct {
28705 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28706 index 2f6daae..c9d7b9e 100644
28707 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28708 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28709 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28710 if (USE_REFCNT(dev))
28711 sequence = nvchan_rd32(chan, 0x48);
28712 else
28713 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28714 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28715
28716 if (chan->fence.sequence_ack == sequence)
28717 goto out;
28718 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28719 return ret;
28720 }
28721
28722 - atomic_set(&chan->fence.last_sequence_irq, 0);
28723 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28724 return 0;
28725 }
28726
28727 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28728 index 7ce3fde..cb3ea04 100644
28729 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28730 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28731 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28732 int trycnt = 0;
28733 int ret, i;
28734
28735 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28736 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28737 retry:
28738 if (++trycnt > 100000) {
28739 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28740 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28741 index d8831ab..0ba8356 100644
28742 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28743 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28744 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28745 bool can_switch;
28746
28747 spin_lock(&dev->count_lock);
28748 - can_switch = (dev->open_count == 0);
28749 + can_switch = (local_read(&dev->open_count) == 0);
28750 spin_unlock(&dev->count_lock);
28751 return can_switch;
28752 }
28753 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28754 index dbdea8e..cd6eeeb 100644
28755 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28756 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28757 @@ -554,7 +554,7 @@ static int
28758 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28759 u32 class, u32 mthd, u32 data)
28760 {
28761 - atomic_set(&chan->fence.last_sequence_irq, data);
28762 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28763 return 0;
28764 }
28765
28766 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28767 index bcac90b..53bfc76 100644
28768 --- a/drivers/gpu/drm/r128/r128_cce.c
28769 +++ b/drivers/gpu/drm/r128/r128_cce.c
28770 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28771
28772 /* GH: Simple idle check.
28773 */
28774 - atomic_set(&dev_priv->idle_count, 0);
28775 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28776
28777 /* We don't support anything other than bus-mastering ring mode,
28778 * but the ring can be in either AGP or PCI space for the ring
28779 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28780 index 930c71b..499aded 100644
28781 --- a/drivers/gpu/drm/r128/r128_drv.h
28782 +++ b/drivers/gpu/drm/r128/r128_drv.h
28783 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28784 int is_pci;
28785 unsigned long cce_buffers_offset;
28786
28787 - atomic_t idle_count;
28788 + atomic_unchecked_t idle_count;
28789
28790 int page_flipping;
28791 int current_page;
28792 u32 crtc_offset;
28793 u32 crtc_offset_cntl;
28794
28795 - atomic_t vbl_received;
28796 + atomic_unchecked_t vbl_received;
28797
28798 u32 color_fmt;
28799 unsigned int front_offset;
28800 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28801 index 429d5a0..7e899ed 100644
28802 --- a/drivers/gpu/drm/r128/r128_irq.c
28803 +++ b/drivers/gpu/drm/r128/r128_irq.c
28804 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28805 if (crtc != 0)
28806 return 0;
28807
28808 - return atomic_read(&dev_priv->vbl_received);
28809 + return atomic_read_unchecked(&dev_priv->vbl_received);
28810 }
28811
28812 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28813 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28814 /* VBLANK interrupt */
28815 if (status & R128_CRTC_VBLANK_INT) {
28816 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28817 - atomic_inc(&dev_priv->vbl_received);
28818 + atomic_inc_unchecked(&dev_priv->vbl_received);
28819 drm_handle_vblank(dev, 0);
28820 return IRQ_HANDLED;
28821 }
28822 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28823 index a9e33ce..09edd4b 100644
28824 --- a/drivers/gpu/drm/r128/r128_state.c
28825 +++ b/drivers/gpu/drm/r128/r128_state.c
28826 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28827
28828 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28829 {
28830 - if (atomic_read(&dev_priv->idle_count) == 0)
28831 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28832 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28833 else
28834 - atomic_set(&dev_priv->idle_count, 0);
28835 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28836 }
28837
28838 #endif
28839 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28840 index 5a82b6b..9e69c73 100644
28841 --- a/drivers/gpu/drm/radeon/mkregtable.c
28842 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28843 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28844 regex_t mask_rex;
28845 regmatch_t match[4];
28846 char buf[1024];
28847 - size_t end;
28848 + long end;
28849 int len;
28850 int done = 0;
28851 int r;
28852 unsigned o;
28853 struct offset *offset;
28854 char last_reg_s[10];
28855 - int last_reg;
28856 + unsigned long last_reg;
28857
28858 if (regcomp
28859 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28860 diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
28861 index cb1acff..8861bc5 100644
28862 --- a/drivers/gpu/drm/radeon/r600_cs.c
28863 +++ b/drivers/gpu/drm/radeon/r600_cs.c
28864 @@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
28865 h0 = G_038004_TEX_HEIGHT(word1) + 1;
28866 d0 = G_038004_TEX_DEPTH(word1);
28867 nfaces = 1;
28868 + array = 0;
28869 switch (G_038000_DIM(word0)) {
28870 case V_038000_SQ_TEX_DIM_1D:
28871 case V_038000_SQ_TEX_DIM_2D:
28872 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28873 index 8227e76..ce0b195 100644
28874 --- a/drivers/gpu/drm/radeon/radeon.h
28875 +++ b/drivers/gpu/drm/radeon/radeon.h
28876 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28877 */
28878 struct radeon_fence_driver {
28879 uint32_t scratch_reg;
28880 - atomic_t seq;
28881 + atomic_unchecked_t seq;
28882 uint32_t last_seq;
28883 unsigned long last_jiffies;
28884 unsigned long last_timeout;
28885 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28886 int x2, int y2);
28887 void (*draw_auto)(struct radeon_device *rdev);
28888 void (*set_default_state)(struct radeon_device *rdev);
28889 -};
28890 +} __no_const;
28891
28892 struct r600_blit {
28893 struct mutex mutex;
28894 @@ -954,7 +954,7 @@ struct radeon_asic {
28895 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28896 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28897 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28898 -};
28899 +} __no_const;
28900
28901 /*
28902 * Asic structures
28903 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28904 index 9231564..78b00fd 100644
28905 --- a/drivers/gpu/drm/radeon/radeon_device.c
28906 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28907 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28908 bool can_switch;
28909
28910 spin_lock(&dev->count_lock);
28911 - can_switch = (dev->open_count == 0);
28912 + can_switch = (local_read(&dev->open_count) == 0);
28913 spin_unlock(&dev->count_lock);
28914 return can_switch;
28915 }
28916 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28917 index a1b59ca..86f2d44 100644
28918 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28919 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28920 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28921
28922 /* SW interrupt */
28923 wait_queue_head_t swi_queue;
28924 - atomic_t swi_emitted;
28925 + atomic_unchecked_t swi_emitted;
28926 int vblank_crtc;
28927 uint32_t irq_enable_reg;
28928 uint32_t r500_disp_irq_reg;
28929 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28930 index 76ec0e9..6feb1a3 100644
28931 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28932 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28933 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28934 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28935 return 0;
28936 }
28937 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28938 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28939 if (!rdev->cp.ready)
28940 /* FIXME: cp is not running assume everythings is done right
28941 * away
28942 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28943 return r;
28944 }
28945 radeon_fence_write(rdev, 0);
28946 - atomic_set(&rdev->fence_drv.seq, 0);
28947 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28948 INIT_LIST_HEAD(&rdev->fence_drv.created);
28949 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28950 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28951 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28952 index 48b7cea..342236f 100644
28953 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28954 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28955 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28956 request = compat_alloc_user_space(sizeof(*request));
28957 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28958 || __put_user(req32.param, &request->param)
28959 - || __put_user((void __user *)(unsigned long)req32.value,
28960 + || __put_user((unsigned long)req32.value,
28961 &request->value))
28962 return -EFAULT;
28963
28964 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28965 index 00da384..32f972d 100644
28966 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28967 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28968 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28969 unsigned int ret;
28970 RING_LOCALS;
28971
28972 - atomic_inc(&dev_priv->swi_emitted);
28973 - ret = atomic_read(&dev_priv->swi_emitted);
28974 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28975 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28976
28977 BEGIN_RING(4);
28978 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28979 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28980 drm_radeon_private_t *dev_priv =
28981 (drm_radeon_private_t *) dev->dev_private;
28982
28983 - atomic_set(&dev_priv->swi_emitted, 0);
28984 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28985 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28986
28987 dev->max_vblank_count = 0x001fffff;
28988 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28989 index e8422ae..d22d4a8 100644
28990 --- a/drivers/gpu/drm/radeon/radeon_state.c
28991 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28992 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28993 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28994 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28995
28996 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28997 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28998 sarea_priv->nbox * sizeof(depth_boxes[0])))
28999 return -EFAULT;
29000
29001 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
29002 {
29003 drm_radeon_private_t *dev_priv = dev->dev_private;
29004 drm_radeon_getparam_t *param = data;
29005 - int value;
29006 + int value = 0;
29007
29008 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29009
29010 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
29011 index 0b5468b..9c4b308 100644
29012 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
29013 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
29014 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29015 }
29016 if (unlikely(ttm_vm_ops == NULL)) {
29017 ttm_vm_ops = vma->vm_ops;
29018 - radeon_ttm_vm_ops = *ttm_vm_ops;
29019 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29020 + pax_open_kernel();
29021 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
29022 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29023 + pax_close_kernel();
29024 }
29025 vma->vm_ops = &radeon_ttm_vm_ops;
29026 return 0;
29027 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
29028 index a9049ed..501f284 100644
29029 --- a/drivers/gpu/drm/radeon/rs690.c
29030 +++ b/drivers/gpu/drm/radeon/rs690.c
29031 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
29032 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29033 rdev->pm.sideport_bandwidth.full)
29034 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29035 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
29036 + read_delay_latency.full = dfixed_const(800 * 1000);
29037 read_delay_latency.full = dfixed_div(read_delay_latency,
29038 rdev->pm.igp_sideport_mclk);
29039 + a.full = dfixed_const(370);
29040 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
29041 } else {
29042 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29043 rdev->pm.k8_bandwidth.full)
29044 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29045 index 727e93d..1565650 100644
29046 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
29047 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29048 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
29049 static int ttm_pool_mm_shrink(struct shrinker *shrink,
29050 struct shrink_control *sc)
29051 {
29052 - static atomic_t start_pool = ATOMIC_INIT(0);
29053 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
29054 unsigned i;
29055 - unsigned pool_offset = atomic_add_return(1, &start_pool);
29056 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
29057 struct ttm_page_pool *pool;
29058 int shrink_pages = sc->nr_to_scan;
29059
29060 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
29061 index 9cf87d9..2000b7d 100644
29062 --- a/drivers/gpu/drm/via/via_drv.h
29063 +++ b/drivers/gpu/drm/via/via_drv.h
29064 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29065 typedef uint32_t maskarray_t[5];
29066
29067 typedef struct drm_via_irq {
29068 - atomic_t irq_received;
29069 + atomic_unchecked_t irq_received;
29070 uint32_t pending_mask;
29071 uint32_t enable_mask;
29072 wait_queue_head_t irq_queue;
29073 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29074 struct timeval last_vblank;
29075 int last_vblank_valid;
29076 unsigned usec_per_vblank;
29077 - atomic_t vbl_received;
29078 + atomic_unchecked_t vbl_received;
29079 drm_via_state_t hc_state;
29080 char pci_buf[VIA_PCI_BUF_SIZE];
29081 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29082 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
29083 index d391f48..10c8ca3 100644
29084 --- a/drivers/gpu/drm/via/via_irq.c
29085 +++ b/drivers/gpu/drm/via/via_irq.c
29086 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
29087 if (crtc != 0)
29088 return 0;
29089
29090 - return atomic_read(&dev_priv->vbl_received);
29091 + return atomic_read_unchecked(&dev_priv->vbl_received);
29092 }
29093
29094 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29095 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29096
29097 status = VIA_READ(VIA_REG_INTERRUPT);
29098 if (status & VIA_IRQ_VBLANK_PENDING) {
29099 - atomic_inc(&dev_priv->vbl_received);
29100 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29101 + atomic_inc_unchecked(&dev_priv->vbl_received);
29102 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29103 do_gettimeofday(&cur_vblank);
29104 if (dev_priv->last_vblank_valid) {
29105 dev_priv->usec_per_vblank =
29106 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29107 dev_priv->last_vblank = cur_vblank;
29108 dev_priv->last_vblank_valid = 1;
29109 }
29110 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29111 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29112 DRM_DEBUG("US per vblank is: %u\n",
29113 dev_priv->usec_per_vblank);
29114 }
29115 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29116
29117 for (i = 0; i < dev_priv->num_irqs; ++i) {
29118 if (status & cur_irq->pending_mask) {
29119 - atomic_inc(&cur_irq->irq_received);
29120 + atomic_inc_unchecked(&cur_irq->irq_received);
29121 DRM_WAKEUP(&cur_irq->irq_queue);
29122 handled = 1;
29123 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
29124 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
29125 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29126 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29127 masks[irq][4]));
29128 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29129 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29130 } else {
29131 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29132 (((cur_irq_sequence =
29133 - atomic_read(&cur_irq->irq_received)) -
29134 + atomic_read_unchecked(&cur_irq->irq_received)) -
29135 *sequence) <= (1 << 23)));
29136 }
29137 *sequence = cur_irq_sequence;
29138 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
29139 }
29140
29141 for (i = 0; i < dev_priv->num_irqs; ++i) {
29142 - atomic_set(&cur_irq->irq_received, 0);
29143 + atomic_set_unchecked(&cur_irq->irq_received, 0);
29144 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29145 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29146 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29147 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
29148 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29149 case VIA_IRQ_RELATIVE:
29150 irqwait->request.sequence +=
29151 - atomic_read(&cur_irq->irq_received);
29152 + atomic_read_unchecked(&cur_irq->irq_received);
29153 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29154 case VIA_IRQ_ABSOLUTE:
29155 break;
29156 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29157 index dc27970..f18b008 100644
29158 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29159 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29160 @@ -260,7 +260,7 @@ struct vmw_private {
29161 * Fencing and IRQs.
29162 */
29163
29164 - atomic_t marker_seq;
29165 + atomic_unchecked_t marker_seq;
29166 wait_queue_head_t fence_queue;
29167 wait_queue_head_t fifo_queue;
29168 int fence_queue_waiters; /* Protected by hw_mutex */
29169 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29170 index a0c2f12..68ae6cb 100644
29171 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29172 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29173 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29174 (unsigned int) min,
29175 (unsigned int) fifo->capabilities);
29176
29177 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29178 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29179 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
29180 vmw_marker_queue_init(&fifo->marker_queue);
29181 return vmw_fifo_send_fence(dev_priv, &dummy);
29182 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29183 if (reserveable)
29184 iowrite32(bytes, fifo_mem +
29185 SVGA_FIFO_RESERVED);
29186 - return fifo_mem + (next_cmd >> 2);
29187 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29188 } else {
29189 need_bounce = true;
29190 }
29191 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29192
29193 fm = vmw_fifo_reserve(dev_priv, bytes);
29194 if (unlikely(fm == NULL)) {
29195 - *seqno = atomic_read(&dev_priv->marker_seq);
29196 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29197 ret = -ENOMEM;
29198 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29199 false, 3*HZ);
29200 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29201 }
29202
29203 do {
29204 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29205 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29206 } while (*seqno == 0);
29207
29208 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29209 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29210 index cabc95f..14b3d77 100644
29211 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29212 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29213 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29214 * emitted. Then the fence is stale and signaled.
29215 */
29216
29217 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29218 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29219 > VMW_FENCE_WRAP);
29220
29221 return ret;
29222 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29223
29224 if (fifo_idle)
29225 down_read(&fifo_state->rwsem);
29226 - signal_seq = atomic_read(&dev_priv->marker_seq);
29227 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29228 ret = 0;
29229
29230 for (;;) {
29231 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29232 index 8a8725c..afed796 100644
29233 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29234 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29235 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29236 while (!vmw_lag_lt(queue, us)) {
29237 spin_lock(&queue->lock);
29238 if (list_empty(&queue->head))
29239 - seqno = atomic_read(&dev_priv->marker_seq);
29240 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29241 else {
29242 marker = list_first_entry(&queue->head,
29243 struct vmw_marker, head);
29244 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29245 index bb656d8..4169fca 100644
29246 --- a/drivers/hid/hid-core.c
29247 +++ b/drivers/hid/hid-core.c
29248 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29249
29250 int hid_add_device(struct hid_device *hdev)
29251 {
29252 - static atomic_t id = ATOMIC_INIT(0);
29253 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29254 int ret;
29255
29256 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29257 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29258 /* XXX hack, any other cleaner solution after the driver core
29259 * is converted to allow more than 20 bytes as the device name? */
29260 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29261 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29262 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29263
29264 hid_debug_register(hdev, dev_name(&hdev->dev));
29265 ret = device_add(&hdev->dev);
29266 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29267 index 4ef02b2..8a96831 100644
29268 --- a/drivers/hid/usbhid/hiddev.c
29269 +++ b/drivers/hid/usbhid/hiddev.c
29270 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29271 break;
29272
29273 case HIDIOCAPPLICATION:
29274 - if (arg < 0 || arg >= hid->maxapplication)
29275 + if (arg >= hid->maxapplication)
29276 break;
29277
29278 for (i = 0; i < hid->maxcollection; i++)
29279 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29280 index 4065374..10ed7dc 100644
29281 --- a/drivers/hv/channel.c
29282 +++ b/drivers/hv/channel.c
29283 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29284 int ret = 0;
29285 int t;
29286
29287 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29288 - atomic_inc(&vmbus_connection.next_gpadl_handle);
29289 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29290 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29291
29292 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29293 if (ret)
29294 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29295 index 0fb100e..baf87e5 100644
29296 --- a/drivers/hv/hv.c
29297 +++ b/drivers/hv/hv.c
29298 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29299 u64 output_address = (output) ? virt_to_phys(output) : 0;
29300 u32 output_address_hi = output_address >> 32;
29301 u32 output_address_lo = output_address & 0xFFFFFFFF;
29302 - void *hypercall_page = hv_context.hypercall_page;
29303 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29304
29305 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29306 "=a"(hv_status_lo) : "d" (control_hi),
29307 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29308 index 0aee112..b72d21f 100644
29309 --- a/drivers/hv/hyperv_vmbus.h
29310 +++ b/drivers/hv/hyperv_vmbus.h
29311 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
29312 struct vmbus_connection {
29313 enum vmbus_connect_state conn_state;
29314
29315 - atomic_t next_gpadl_handle;
29316 + atomic_unchecked_t next_gpadl_handle;
29317
29318 /*
29319 * Represents channel interrupts. Each bit position represents a
29320 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29321 index d2d0a2a..90b8f4d 100644
29322 --- a/drivers/hv/vmbus_drv.c
29323 +++ b/drivers/hv/vmbus_drv.c
29324 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29325 {
29326 int ret = 0;
29327
29328 - static atomic_t device_num = ATOMIC_INIT(0);
29329 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29330
29331 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29332 - atomic_inc_return(&device_num));
29333 + atomic_inc_return_unchecked(&device_num));
29334
29335 child_device_obj->device.bus = &hv_bus;
29336 child_device_obj->device.parent = &hv_acpi_dev->dev;
29337 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29338 index 66f6729..2d6de0a 100644
29339 --- a/drivers/hwmon/acpi_power_meter.c
29340 +++ b/drivers/hwmon/acpi_power_meter.c
29341 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29342 return res;
29343
29344 temp /= 1000;
29345 - if (temp < 0)
29346 - return -EINVAL;
29347
29348 mutex_lock(&resource->lock);
29349 resource->trip[attr->index - 7] = temp;
29350 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29351 index 5357925..6cf0418 100644
29352 --- a/drivers/hwmon/sht15.c
29353 +++ b/drivers/hwmon/sht15.c
29354 @@ -166,7 +166,7 @@ struct sht15_data {
29355 int supply_uV;
29356 bool supply_uV_valid;
29357 struct work_struct update_supply_work;
29358 - atomic_t interrupt_handled;
29359 + atomic_unchecked_t interrupt_handled;
29360 };
29361
29362 /**
29363 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29364 return ret;
29365
29366 gpio_direction_input(data->pdata->gpio_data);
29367 - atomic_set(&data->interrupt_handled, 0);
29368 + atomic_set_unchecked(&data->interrupt_handled, 0);
29369
29370 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29371 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29372 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29373 /* Only relevant if the interrupt hasn't occurred. */
29374 - if (!atomic_read(&data->interrupt_handled))
29375 + if (!atomic_read_unchecked(&data->interrupt_handled))
29376 schedule_work(&data->read_work);
29377 }
29378 ret = wait_event_timeout(data->wait_queue,
29379 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29380
29381 /* First disable the interrupt */
29382 disable_irq_nosync(irq);
29383 - atomic_inc(&data->interrupt_handled);
29384 + atomic_inc_unchecked(&data->interrupt_handled);
29385 /* Then schedule a reading work struct */
29386 if (data->state != SHT15_READING_NOTHING)
29387 schedule_work(&data->read_work);
29388 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29389 * If not, then start the interrupt again - care here as could
29390 * have gone low in meantime so verify it hasn't!
29391 */
29392 - atomic_set(&data->interrupt_handled, 0);
29393 + atomic_set_unchecked(&data->interrupt_handled, 0);
29394 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29395 /* If still not occurred or another handler has been scheduled */
29396 if (gpio_get_value(data->pdata->gpio_data)
29397 - || atomic_read(&data->interrupt_handled))
29398 + || atomic_read_unchecked(&data->interrupt_handled))
29399 return;
29400 }
29401
29402 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29403 index 378fcb5..5e91fa8 100644
29404 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29405 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29406 @@ -43,7 +43,7 @@
29407 extern struct i2c_adapter amd756_smbus;
29408
29409 static struct i2c_adapter *s4882_adapter;
29410 -static struct i2c_algorithm *s4882_algo;
29411 +static i2c_algorithm_no_const *s4882_algo;
29412
29413 /* Wrapper access functions for multiplexed SMBus */
29414 static DEFINE_MUTEX(amd756_lock);
29415 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29416 index 29015eb..af2d8e9 100644
29417 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29418 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29419 @@ -41,7 +41,7 @@
29420 extern struct i2c_adapter *nforce2_smbus;
29421
29422 static struct i2c_adapter *s4985_adapter;
29423 -static struct i2c_algorithm *s4985_algo;
29424 +static i2c_algorithm_no_const *s4985_algo;
29425
29426 /* Wrapper access functions for multiplexed SMBus */
29427 static DEFINE_MUTEX(nforce2_lock);
29428 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29429 index d7a4833..7fae376 100644
29430 --- a/drivers/i2c/i2c-mux.c
29431 +++ b/drivers/i2c/i2c-mux.c
29432 @@ -28,7 +28,7 @@
29433 /* multiplexer per channel data */
29434 struct i2c_mux_priv {
29435 struct i2c_adapter adap;
29436 - struct i2c_algorithm algo;
29437 + i2c_algorithm_no_const algo;
29438
29439 struct i2c_adapter *parent;
29440 void *mux_dev; /* the mux chip/device */
29441 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29442 index 57d00ca..0145194 100644
29443 --- a/drivers/ide/aec62xx.c
29444 +++ b/drivers/ide/aec62xx.c
29445 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29446 .cable_detect = atp86x_cable_detect,
29447 };
29448
29449 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29450 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29451 { /* 0: AEC6210 */
29452 .name = DRV_NAME,
29453 .init_chipset = init_chipset_aec62xx,
29454 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29455 index 2c8016a..911a27c 100644
29456 --- a/drivers/ide/alim15x3.c
29457 +++ b/drivers/ide/alim15x3.c
29458 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29459 .dma_sff_read_status = ide_dma_sff_read_status,
29460 };
29461
29462 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29463 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29464 .name = DRV_NAME,
29465 .init_chipset = init_chipset_ali15x3,
29466 .init_hwif = init_hwif_ali15x3,
29467 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29468 index 3747b25..56fc995 100644
29469 --- a/drivers/ide/amd74xx.c
29470 +++ b/drivers/ide/amd74xx.c
29471 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29472 .udma_mask = udma, \
29473 }
29474
29475 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29476 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29477 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29478 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29479 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29480 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29481 index 15f0ead..cb43480 100644
29482 --- a/drivers/ide/atiixp.c
29483 +++ b/drivers/ide/atiixp.c
29484 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29485 .cable_detect = atiixp_cable_detect,
29486 };
29487
29488 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29489 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29490 { /* 0: IXP200/300/400/700 */
29491 .name = DRV_NAME,
29492 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29493 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29494 index 5f80312..d1fc438 100644
29495 --- a/drivers/ide/cmd64x.c
29496 +++ b/drivers/ide/cmd64x.c
29497 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29498 .dma_sff_read_status = ide_dma_sff_read_status,
29499 };
29500
29501 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29502 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29503 { /* 0: CMD643 */
29504 .name = DRV_NAME,
29505 .init_chipset = init_chipset_cmd64x,
29506 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29507 index 2c1e5f7..1444762 100644
29508 --- a/drivers/ide/cs5520.c
29509 +++ b/drivers/ide/cs5520.c
29510 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29511 .set_dma_mode = cs5520_set_dma_mode,
29512 };
29513
29514 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29515 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29516 .name = DRV_NAME,
29517 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29518 .port_ops = &cs5520_port_ops,
29519 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29520 index 4dc4eb9..49b40ad 100644
29521 --- a/drivers/ide/cs5530.c
29522 +++ b/drivers/ide/cs5530.c
29523 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29524 .udma_filter = cs5530_udma_filter,
29525 };
29526
29527 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29528 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29529 .name = DRV_NAME,
29530 .init_chipset = init_chipset_cs5530,
29531 .init_hwif = init_hwif_cs5530,
29532 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29533 index 5059faf..18d4c85 100644
29534 --- a/drivers/ide/cs5535.c
29535 +++ b/drivers/ide/cs5535.c
29536 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29537 .cable_detect = cs5535_cable_detect,
29538 };
29539
29540 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29541 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29542 .name = DRV_NAME,
29543 .port_ops = &cs5535_port_ops,
29544 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29545 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29546 index 847553f..3ffb49d 100644
29547 --- a/drivers/ide/cy82c693.c
29548 +++ b/drivers/ide/cy82c693.c
29549 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29550 .set_dma_mode = cy82c693_set_dma_mode,
29551 };
29552
29553 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29554 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29555 .name = DRV_NAME,
29556 .init_iops = init_iops_cy82c693,
29557 .port_ops = &cy82c693_port_ops,
29558 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29559 index 58c51cd..4aec3b8 100644
29560 --- a/drivers/ide/hpt366.c
29561 +++ b/drivers/ide/hpt366.c
29562 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29563 }
29564 };
29565
29566 -static const struct hpt_info hpt36x __devinitdata = {
29567 +static const struct hpt_info hpt36x __devinitconst = {
29568 .chip_name = "HPT36x",
29569 .chip_type = HPT36x,
29570 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29571 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29572 .timings = &hpt36x_timings
29573 };
29574
29575 -static const struct hpt_info hpt370 __devinitdata = {
29576 +static const struct hpt_info hpt370 __devinitconst = {
29577 .chip_name = "HPT370",
29578 .chip_type = HPT370,
29579 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29580 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29581 .timings = &hpt37x_timings
29582 };
29583
29584 -static const struct hpt_info hpt370a __devinitdata = {
29585 +static const struct hpt_info hpt370a __devinitconst = {
29586 .chip_name = "HPT370A",
29587 .chip_type = HPT370A,
29588 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29589 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29590 .timings = &hpt37x_timings
29591 };
29592
29593 -static const struct hpt_info hpt374 __devinitdata = {
29594 +static const struct hpt_info hpt374 __devinitconst = {
29595 .chip_name = "HPT374",
29596 .chip_type = HPT374,
29597 .udma_mask = ATA_UDMA5,
29598 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29599 .timings = &hpt37x_timings
29600 };
29601
29602 -static const struct hpt_info hpt372 __devinitdata = {
29603 +static const struct hpt_info hpt372 __devinitconst = {
29604 .chip_name = "HPT372",
29605 .chip_type = HPT372,
29606 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29607 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29608 .timings = &hpt37x_timings
29609 };
29610
29611 -static const struct hpt_info hpt372a __devinitdata = {
29612 +static const struct hpt_info hpt372a __devinitconst = {
29613 .chip_name = "HPT372A",
29614 .chip_type = HPT372A,
29615 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29616 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29617 .timings = &hpt37x_timings
29618 };
29619
29620 -static const struct hpt_info hpt302 __devinitdata = {
29621 +static const struct hpt_info hpt302 __devinitconst = {
29622 .chip_name = "HPT302",
29623 .chip_type = HPT302,
29624 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29625 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29626 .timings = &hpt37x_timings
29627 };
29628
29629 -static const struct hpt_info hpt371 __devinitdata = {
29630 +static const struct hpt_info hpt371 __devinitconst = {
29631 .chip_name = "HPT371",
29632 .chip_type = HPT371,
29633 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29634 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29635 .timings = &hpt37x_timings
29636 };
29637
29638 -static const struct hpt_info hpt372n __devinitdata = {
29639 +static const struct hpt_info hpt372n __devinitconst = {
29640 .chip_name = "HPT372N",
29641 .chip_type = HPT372N,
29642 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29643 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29644 .timings = &hpt37x_timings
29645 };
29646
29647 -static const struct hpt_info hpt302n __devinitdata = {
29648 +static const struct hpt_info hpt302n __devinitconst = {
29649 .chip_name = "HPT302N",
29650 .chip_type = HPT302N,
29651 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29652 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29653 .timings = &hpt37x_timings
29654 };
29655
29656 -static const struct hpt_info hpt371n __devinitdata = {
29657 +static const struct hpt_info hpt371n __devinitconst = {
29658 .chip_name = "HPT371N",
29659 .chip_type = HPT371N,
29660 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29661 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29662 .dma_sff_read_status = ide_dma_sff_read_status,
29663 };
29664
29665 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29666 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29667 { /* 0: HPT36x */
29668 .name = DRV_NAME,
29669 .init_chipset = init_chipset_hpt366,
29670 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29671 index 8126824..55a2798 100644
29672 --- a/drivers/ide/ide-cd.c
29673 +++ b/drivers/ide/ide-cd.c
29674 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29675 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29676 if ((unsigned long)buf & alignment
29677 || blk_rq_bytes(rq) & q->dma_pad_mask
29678 - || object_is_on_stack(buf))
29679 + || object_starts_on_stack(buf))
29680 drive->dma = 0;
29681 }
29682 }
29683 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29684 index a743e68..1cfd674 100644
29685 --- a/drivers/ide/ide-pci-generic.c
29686 +++ b/drivers/ide/ide-pci-generic.c
29687 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29688 .udma_mask = ATA_UDMA6, \
29689 }
29690
29691 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29692 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29693 /* 0: Unknown */
29694 DECLARE_GENERIC_PCI_DEV(0),
29695
29696 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29697 index 560e66d..d5dd180 100644
29698 --- a/drivers/ide/it8172.c
29699 +++ b/drivers/ide/it8172.c
29700 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29701 .set_dma_mode = it8172_set_dma_mode,
29702 };
29703
29704 -static const struct ide_port_info it8172_port_info __devinitdata = {
29705 +static const struct ide_port_info it8172_port_info __devinitconst = {
29706 .name = DRV_NAME,
29707 .port_ops = &it8172_port_ops,
29708 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29709 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29710 index 46816ba..1847aeb 100644
29711 --- a/drivers/ide/it8213.c
29712 +++ b/drivers/ide/it8213.c
29713 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29714 .cable_detect = it8213_cable_detect,
29715 };
29716
29717 -static const struct ide_port_info it8213_chipset __devinitdata = {
29718 +static const struct ide_port_info it8213_chipset __devinitconst = {
29719 .name = DRV_NAME,
29720 .enablebits = { {0x41, 0x80, 0x80} },
29721 .port_ops = &it8213_port_ops,
29722 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29723 index 2e3169f..c5611db 100644
29724 --- a/drivers/ide/it821x.c
29725 +++ b/drivers/ide/it821x.c
29726 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29727 .cable_detect = it821x_cable_detect,
29728 };
29729
29730 -static const struct ide_port_info it821x_chipset __devinitdata = {
29731 +static const struct ide_port_info it821x_chipset __devinitconst = {
29732 .name = DRV_NAME,
29733 .init_chipset = init_chipset_it821x,
29734 .init_hwif = init_hwif_it821x,
29735 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29736 index 74c2c4a..efddd7d 100644
29737 --- a/drivers/ide/jmicron.c
29738 +++ b/drivers/ide/jmicron.c
29739 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29740 .cable_detect = jmicron_cable_detect,
29741 };
29742
29743 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29744 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29745 .name = DRV_NAME,
29746 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29747 .port_ops = &jmicron_port_ops,
29748 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29749 index 95327a2..73f78d8 100644
29750 --- a/drivers/ide/ns87415.c
29751 +++ b/drivers/ide/ns87415.c
29752 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29753 .dma_sff_read_status = superio_dma_sff_read_status,
29754 };
29755
29756 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29757 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29758 .name = DRV_NAME,
29759 .init_hwif = init_hwif_ns87415,
29760 .tp_ops = &ns87415_tp_ops,
29761 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29762 index 1a53a4c..39edc66 100644
29763 --- a/drivers/ide/opti621.c
29764 +++ b/drivers/ide/opti621.c
29765 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29766 .set_pio_mode = opti621_set_pio_mode,
29767 };
29768
29769 -static const struct ide_port_info opti621_chipset __devinitdata = {
29770 +static const struct ide_port_info opti621_chipset __devinitconst = {
29771 .name = DRV_NAME,
29772 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29773 .port_ops = &opti621_port_ops,
29774 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29775 index 9546fe2..2e5ceb6 100644
29776 --- a/drivers/ide/pdc202xx_new.c
29777 +++ b/drivers/ide/pdc202xx_new.c
29778 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29779 .udma_mask = udma, \
29780 }
29781
29782 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29783 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29784 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29785 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29786 };
29787 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29788 index 3a35ec6..5634510 100644
29789 --- a/drivers/ide/pdc202xx_old.c
29790 +++ b/drivers/ide/pdc202xx_old.c
29791 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29792 .max_sectors = sectors, \
29793 }
29794
29795 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29796 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29797 { /* 0: PDC20246 */
29798 .name = DRV_NAME,
29799 .init_chipset = init_chipset_pdc202xx,
29800 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29801 index 1892e81..fe0fd60 100644
29802 --- a/drivers/ide/piix.c
29803 +++ b/drivers/ide/piix.c
29804 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29805 .udma_mask = udma, \
29806 }
29807
29808 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29809 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29810 /* 0: MPIIX */
29811 { /*
29812 * MPIIX actually has only a single IDE channel mapped to
29813 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29814 index a6414a8..c04173e 100644
29815 --- a/drivers/ide/rz1000.c
29816 +++ b/drivers/ide/rz1000.c
29817 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29818 }
29819 }
29820
29821 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29822 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29823 .name = DRV_NAME,
29824 .host_flags = IDE_HFLAG_NO_DMA,
29825 };
29826 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29827 index 356b9b5..d4758eb 100644
29828 --- a/drivers/ide/sc1200.c
29829 +++ b/drivers/ide/sc1200.c
29830 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29831 .dma_sff_read_status = ide_dma_sff_read_status,
29832 };
29833
29834 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29835 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29836 .name = DRV_NAME,
29837 .port_ops = &sc1200_port_ops,
29838 .dma_ops = &sc1200_dma_ops,
29839 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29840 index b7f5b0c..9701038 100644
29841 --- a/drivers/ide/scc_pata.c
29842 +++ b/drivers/ide/scc_pata.c
29843 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29844 .dma_sff_read_status = scc_dma_sff_read_status,
29845 };
29846
29847 -static const struct ide_port_info scc_chipset __devinitdata = {
29848 +static const struct ide_port_info scc_chipset __devinitconst = {
29849 .name = "sccIDE",
29850 .init_iops = init_iops_scc,
29851 .init_dma = scc_init_dma,
29852 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29853 index 35fb8da..24d72ef 100644
29854 --- a/drivers/ide/serverworks.c
29855 +++ b/drivers/ide/serverworks.c
29856 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29857 .cable_detect = svwks_cable_detect,
29858 };
29859
29860 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29861 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29862 { /* 0: OSB4 */
29863 .name = DRV_NAME,
29864 .init_chipset = init_chipset_svwks,
29865 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29866 index ddeda44..46f7e30 100644
29867 --- a/drivers/ide/siimage.c
29868 +++ b/drivers/ide/siimage.c
29869 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29870 .udma_mask = ATA_UDMA6, \
29871 }
29872
29873 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29874 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29875 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29876 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29877 };
29878 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29879 index 4a00225..09e61b4 100644
29880 --- a/drivers/ide/sis5513.c
29881 +++ b/drivers/ide/sis5513.c
29882 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29883 .cable_detect = sis_cable_detect,
29884 };
29885
29886 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29887 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29888 .name = DRV_NAME,
29889 .init_chipset = init_chipset_sis5513,
29890 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29891 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29892 index f21dc2a..d051cd2 100644
29893 --- a/drivers/ide/sl82c105.c
29894 +++ b/drivers/ide/sl82c105.c
29895 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29896 .dma_sff_read_status = ide_dma_sff_read_status,
29897 };
29898
29899 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29900 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29901 .name = DRV_NAME,
29902 .init_chipset = init_chipset_sl82c105,
29903 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29904 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29905 index 864ffe0..863a5e9 100644
29906 --- a/drivers/ide/slc90e66.c
29907 +++ b/drivers/ide/slc90e66.c
29908 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29909 .cable_detect = slc90e66_cable_detect,
29910 };
29911
29912 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29913 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29914 .name = DRV_NAME,
29915 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29916 .port_ops = &slc90e66_port_ops,
29917 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29918 index 4799d5c..1794678 100644
29919 --- a/drivers/ide/tc86c001.c
29920 +++ b/drivers/ide/tc86c001.c
29921 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29922 .dma_sff_read_status = ide_dma_sff_read_status,
29923 };
29924
29925 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29926 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29927 .name = DRV_NAME,
29928 .init_hwif = init_hwif_tc86c001,
29929 .port_ops = &tc86c001_port_ops,
29930 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29931 index 281c914..55ce1b8 100644
29932 --- a/drivers/ide/triflex.c
29933 +++ b/drivers/ide/triflex.c
29934 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29935 .set_dma_mode = triflex_set_mode,
29936 };
29937
29938 -static const struct ide_port_info triflex_device __devinitdata = {
29939 +static const struct ide_port_info triflex_device __devinitconst = {
29940 .name = DRV_NAME,
29941 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29942 .port_ops = &triflex_port_ops,
29943 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29944 index 4b42ca0..e494a98 100644
29945 --- a/drivers/ide/trm290.c
29946 +++ b/drivers/ide/trm290.c
29947 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29948 .dma_check = trm290_dma_check,
29949 };
29950
29951 -static const struct ide_port_info trm290_chipset __devinitdata = {
29952 +static const struct ide_port_info trm290_chipset __devinitconst = {
29953 .name = DRV_NAME,
29954 .init_hwif = init_hwif_trm290,
29955 .tp_ops = &trm290_tp_ops,
29956 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29957 index f46f49c..eb77678 100644
29958 --- a/drivers/ide/via82cxxx.c
29959 +++ b/drivers/ide/via82cxxx.c
29960 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29961 .cable_detect = via82cxxx_cable_detect,
29962 };
29963
29964 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29965 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29966 .name = DRV_NAME,
29967 .init_chipset = init_chipset_via82cxxx,
29968 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29969 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29970 index eb0e2cc..14241c7 100644
29971 --- a/drivers/ieee802154/fakehard.c
29972 +++ b/drivers/ieee802154/fakehard.c
29973 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29974 phy->transmit_power = 0xbf;
29975
29976 dev->netdev_ops = &fake_ops;
29977 - dev->ml_priv = &fake_mlme;
29978 + dev->ml_priv = (void *)&fake_mlme;
29979
29980 priv = netdev_priv(dev);
29981 priv->phy = phy;
29982 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29983 index 8b72f39..55df4c8 100644
29984 --- a/drivers/infiniband/core/cm.c
29985 +++ b/drivers/infiniband/core/cm.c
29986 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29987
29988 struct cm_counter_group {
29989 struct kobject obj;
29990 - atomic_long_t counter[CM_ATTR_COUNT];
29991 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29992 };
29993
29994 struct cm_counter_attribute {
29995 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29996 struct ib_mad_send_buf *msg = NULL;
29997 int ret;
29998
29999 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30000 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30001 counter[CM_REQ_COUNTER]);
30002
30003 /* Quick state check to discard duplicate REQs. */
30004 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
30005 if (!cm_id_priv)
30006 return;
30007
30008 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30009 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30010 counter[CM_REP_COUNTER]);
30011 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30012 if (ret)
30013 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
30014 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30015 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30016 spin_unlock_irq(&cm_id_priv->lock);
30017 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30018 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30019 counter[CM_RTU_COUNTER]);
30020 goto out;
30021 }
30022 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
30023 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30024 dreq_msg->local_comm_id);
30025 if (!cm_id_priv) {
30026 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30027 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30028 counter[CM_DREQ_COUNTER]);
30029 cm_issue_drep(work->port, work->mad_recv_wc);
30030 return -EINVAL;
30031 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
30032 case IB_CM_MRA_REP_RCVD:
30033 break;
30034 case IB_CM_TIMEWAIT:
30035 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30036 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30037 counter[CM_DREQ_COUNTER]);
30038 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30039 goto unlock;
30040 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
30041 cm_free_msg(msg);
30042 goto deref;
30043 case IB_CM_DREQ_RCVD:
30044 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30045 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30046 counter[CM_DREQ_COUNTER]);
30047 goto unlock;
30048 default:
30049 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
30050 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30051 cm_id_priv->msg, timeout)) {
30052 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30053 - atomic_long_inc(&work->port->
30054 + atomic_long_inc_unchecked(&work->port->
30055 counter_group[CM_RECV_DUPLICATES].
30056 counter[CM_MRA_COUNTER]);
30057 goto out;
30058 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
30059 break;
30060 case IB_CM_MRA_REQ_RCVD:
30061 case IB_CM_MRA_REP_RCVD:
30062 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30063 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30064 counter[CM_MRA_COUNTER]);
30065 /* fall through */
30066 default:
30067 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
30068 case IB_CM_LAP_IDLE:
30069 break;
30070 case IB_CM_MRA_LAP_SENT:
30071 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30072 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30073 counter[CM_LAP_COUNTER]);
30074 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30075 goto unlock;
30076 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
30077 cm_free_msg(msg);
30078 goto deref;
30079 case IB_CM_LAP_RCVD:
30080 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30081 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30082 counter[CM_LAP_COUNTER]);
30083 goto unlock;
30084 default:
30085 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
30086 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30087 if (cur_cm_id_priv) {
30088 spin_unlock_irq(&cm.lock);
30089 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30090 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30091 counter[CM_SIDR_REQ_COUNTER]);
30092 goto out; /* Duplicate message. */
30093 }
30094 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
30095 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30096 msg->retries = 1;
30097
30098 - atomic_long_add(1 + msg->retries,
30099 + atomic_long_add_unchecked(1 + msg->retries,
30100 &port->counter_group[CM_XMIT].counter[attr_index]);
30101 if (msg->retries)
30102 - atomic_long_add(msg->retries,
30103 + atomic_long_add_unchecked(msg->retries,
30104 &port->counter_group[CM_XMIT_RETRIES].
30105 counter[attr_index]);
30106
30107 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
30108 }
30109
30110 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30111 - atomic_long_inc(&port->counter_group[CM_RECV].
30112 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30113 counter[attr_id - CM_ATTR_ID_OFFSET]);
30114
30115 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30116 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
30117 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30118
30119 return sprintf(buf, "%ld\n",
30120 - atomic_long_read(&group->counter[cm_attr->index]));
30121 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30122 }
30123
30124 static const struct sysfs_ops cm_counter_ops = {
30125 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
30126 index 176c8f9..2627b62 100644
30127 --- a/drivers/infiniband/core/fmr_pool.c
30128 +++ b/drivers/infiniband/core/fmr_pool.c
30129 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
30130
30131 struct task_struct *thread;
30132
30133 - atomic_t req_ser;
30134 - atomic_t flush_ser;
30135 + atomic_unchecked_t req_ser;
30136 + atomic_unchecked_t flush_ser;
30137
30138 wait_queue_head_t force_wait;
30139 };
30140 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30141 struct ib_fmr_pool *pool = pool_ptr;
30142
30143 do {
30144 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30145 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30146 ib_fmr_batch_release(pool);
30147
30148 - atomic_inc(&pool->flush_ser);
30149 + atomic_inc_unchecked(&pool->flush_ser);
30150 wake_up_interruptible(&pool->force_wait);
30151
30152 if (pool->flush_function)
30153 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30154 }
30155
30156 set_current_state(TASK_INTERRUPTIBLE);
30157 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30158 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30159 !kthread_should_stop())
30160 schedule();
30161 __set_current_state(TASK_RUNNING);
30162 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
30163 pool->dirty_watermark = params->dirty_watermark;
30164 pool->dirty_len = 0;
30165 spin_lock_init(&pool->pool_lock);
30166 - atomic_set(&pool->req_ser, 0);
30167 - atomic_set(&pool->flush_ser, 0);
30168 + atomic_set_unchecked(&pool->req_ser, 0);
30169 + atomic_set_unchecked(&pool->flush_ser, 0);
30170 init_waitqueue_head(&pool->force_wait);
30171
30172 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30173 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
30174 }
30175 spin_unlock_irq(&pool->pool_lock);
30176
30177 - serial = atomic_inc_return(&pool->req_ser);
30178 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30179 wake_up_process(pool->thread);
30180
30181 if (wait_event_interruptible(pool->force_wait,
30182 - atomic_read(&pool->flush_ser) - serial >= 0))
30183 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30184 return -EINTR;
30185
30186 return 0;
30187 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30188 } else {
30189 list_add_tail(&fmr->list, &pool->dirty_list);
30190 if (++pool->dirty_len >= pool->dirty_watermark) {
30191 - atomic_inc(&pool->req_ser);
30192 + atomic_inc_unchecked(&pool->req_ser);
30193 wake_up_process(pool->thread);
30194 }
30195 }
30196 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30197 index 40c8353..946b0e4 100644
30198 --- a/drivers/infiniband/hw/cxgb4/mem.c
30199 +++ b/drivers/infiniband/hw/cxgb4/mem.c
30200 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30201 int err;
30202 struct fw_ri_tpte tpt;
30203 u32 stag_idx;
30204 - static atomic_t key;
30205 + static atomic_unchecked_t key;
30206
30207 if (c4iw_fatal_error(rdev))
30208 return -EIO;
30209 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30210 &rdev->resource.tpt_fifo_lock);
30211 if (!stag_idx)
30212 return -ENOMEM;
30213 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30214 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30215 }
30216 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30217 __func__, stag_state, type, pdid, stag_idx);
30218 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30219 index 79b3dbc..96e5fcc 100644
30220 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
30221 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30222 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30223 struct ib_atomic_eth *ateth;
30224 struct ipath_ack_entry *e;
30225 u64 vaddr;
30226 - atomic64_t *maddr;
30227 + atomic64_unchecked_t *maddr;
30228 u64 sdata;
30229 u32 rkey;
30230 u8 next;
30231 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30232 IB_ACCESS_REMOTE_ATOMIC)))
30233 goto nack_acc_unlck;
30234 /* Perform atomic OP and save result. */
30235 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30236 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30237 sdata = be64_to_cpu(ateth->swap_data);
30238 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30239 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30240 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30241 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30242 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30243 be64_to_cpu(ateth->compare_data),
30244 sdata);
30245 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30246 index 1f95bba..9530f87 100644
30247 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30248 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30249 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30250 unsigned long flags;
30251 struct ib_wc wc;
30252 u64 sdata;
30253 - atomic64_t *maddr;
30254 + atomic64_unchecked_t *maddr;
30255 enum ib_wc_status send_status;
30256
30257 /*
30258 @@ -382,11 +382,11 @@ again:
30259 IB_ACCESS_REMOTE_ATOMIC)))
30260 goto acc_err;
30261 /* Perform atomic OP and save result. */
30262 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30263 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30264 sdata = wqe->wr.wr.atomic.compare_add;
30265 *(u64 *) sqp->s_sge.sge.vaddr =
30266 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30267 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30268 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30269 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30270 sdata, wqe->wr.wr.atomic.swap);
30271 goto send_comp;
30272 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30273 index 5965b3d..16817fb 100644
30274 --- a/drivers/infiniband/hw/nes/nes.c
30275 +++ b/drivers/infiniband/hw/nes/nes.c
30276 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30277 LIST_HEAD(nes_adapter_list);
30278 static LIST_HEAD(nes_dev_list);
30279
30280 -atomic_t qps_destroyed;
30281 +atomic_unchecked_t qps_destroyed;
30282
30283 static unsigned int ee_flsh_adapter;
30284 static unsigned int sysfs_nonidx_addr;
30285 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30286 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30287 struct nes_adapter *nesadapter = nesdev->nesadapter;
30288
30289 - atomic_inc(&qps_destroyed);
30290 + atomic_inc_unchecked(&qps_destroyed);
30291
30292 /* Free the control structures */
30293
30294 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30295 index 568b4f1..5ea3eff 100644
30296 --- a/drivers/infiniband/hw/nes/nes.h
30297 +++ b/drivers/infiniband/hw/nes/nes.h
30298 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30299 extern unsigned int wqm_quanta;
30300 extern struct list_head nes_adapter_list;
30301
30302 -extern atomic_t cm_connects;
30303 -extern atomic_t cm_accepts;
30304 -extern atomic_t cm_disconnects;
30305 -extern atomic_t cm_closes;
30306 -extern atomic_t cm_connecteds;
30307 -extern atomic_t cm_connect_reqs;
30308 -extern atomic_t cm_rejects;
30309 -extern atomic_t mod_qp_timouts;
30310 -extern atomic_t qps_created;
30311 -extern atomic_t qps_destroyed;
30312 -extern atomic_t sw_qps_destroyed;
30313 +extern atomic_unchecked_t cm_connects;
30314 +extern atomic_unchecked_t cm_accepts;
30315 +extern atomic_unchecked_t cm_disconnects;
30316 +extern atomic_unchecked_t cm_closes;
30317 +extern atomic_unchecked_t cm_connecteds;
30318 +extern atomic_unchecked_t cm_connect_reqs;
30319 +extern atomic_unchecked_t cm_rejects;
30320 +extern atomic_unchecked_t mod_qp_timouts;
30321 +extern atomic_unchecked_t qps_created;
30322 +extern atomic_unchecked_t qps_destroyed;
30323 +extern atomic_unchecked_t sw_qps_destroyed;
30324 extern u32 mh_detected;
30325 extern u32 mh_pauses_sent;
30326 extern u32 cm_packets_sent;
30327 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30328 extern u32 cm_packets_received;
30329 extern u32 cm_packets_dropped;
30330 extern u32 cm_packets_retrans;
30331 -extern atomic_t cm_listens_created;
30332 -extern atomic_t cm_listens_destroyed;
30333 +extern atomic_unchecked_t cm_listens_created;
30334 +extern atomic_unchecked_t cm_listens_destroyed;
30335 extern u32 cm_backlog_drops;
30336 -extern atomic_t cm_loopbacks;
30337 -extern atomic_t cm_nodes_created;
30338 -extern atomic_t cm_nodes_destroyed;
30339 -extern atomic_t cm_accel_dropped_pkts;
30340 -extern atomic_t cm_resets_recvd;
30341 -extern atomic_t pau_qps_created;
30342 -extern atomic_t pau_qps_destroyed;
30343 +extern atomic_unchecked_t cm_loopbacks;
30344 +extern atomic_unchecked_t cm_nodes_created;
30345 +extern atomic_unchecked_t cm_nodes_destroyed;
30346 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30347 +extern atomic_unchecked_t cm_resets_recvd;
30348 +extern atomic_unchecked_t pau_qps_created;
30349 +extern atomic_unchecked_t pau_qps_destroyed;
30350
30351 extern u32 int_mod_timer_init;
30352 extern u32 int_mod_cq_depth_256;
30353 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30354 index 0a52d72..0642f36 100644
30355 --- a/drivers/infiniband/hw/nes/nes_cm.c
30356 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30357 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30358 u32 cm_packets_retrans;
30359 u32 cm_packets_created;
30360 u32 cm_packets_received;
30361 -atomic_t cm_listens_created;
30362 -atomic_t cm_listens_destroyed;
30363 +atomic_unchecked_t cm_listens_created;
30364 +atomic_unchecked_t cm_listens_destroyed;
30365 u32 cm_backlog_drops;
30366 -atomic_t cm_loopbacks;
30367 -atomic_t cm_nodes_created;
30368 -atomic_t cm_nodes_destroyed;
30369 -atomic_t cm_accel_dropped_pkts;
30370 -atomic_t cm_resets_recvd;
30371 +atomic_unchecked_t cm_loopbacks;
30372 +atomic_unchecked_t cm_nodes_created;
30373 +atomic_unchecked_t cm_nodes_destroyed;
30374 +atomic_unchecked_t cm_accel_dropped_pkts;
30375 +atomic_unchecked_t cm_resets_recvd;
30376
30377 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30378 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30379 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30380
30381 static struct nes_cm_core *g_cm_core;
30382
30383 -atomic_t cm_connects;
30384 -atomic_t cm_accepts;
30385 -atomic_t cm_disconnects;
30386 -atomic_t cm_closes;
30387 -atomic_t cm_connecteds;
30388 -atomic_t cm_connect_reqs;
30389 -atomic_t cm_rejects;
30390 +atomic_unchecked_t cm_connects;
30391 +atomic_unchecked_t cm_accepts;
30392 +atomic_unchecked_t cm_disconnects;
30393 +atomic_unchecked_t cm_closes;
30394 +atomic_unchecked_t cm_connecteds;
30395 +atomic_unchecked_t cm_connect_reqs;
30396 +atomic_unchecked_t cm_rejects;
30397
30398 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30399 {
30400 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30401 kfree(listener);
30402 listener = NULL;
30403 ret = 0;
30404 - atomic_inc(&cm_listens_destroyed);
30405 + atomic_inc_unchecked(&cm_listens_destroyed);
30406 } else {
30407 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30408 }
30409 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30410 cm_node->rem_mac);
30411
30412 add_hte_node(cm_core, cm_node);
30413 - atomic_inc(&cm_nodes_created);
30414 + atomic_inc_unchecked(&cm_nodes_created);
30415
30416 return cm_node;
30417 }
30418 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30419 }
30420
30421 atomic_dec(&cm_core->node_cnt);
30422 - atomic_inc(&cm_nodes_destroyed);
30423 + atomic_inc_unchecked(&cm_nodes_destroyed);
30424 nesqp = cm_node->nesqp;
30425 if (nesqp) {
30426 nesqp->cm_node = NULL;
30427 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30428
30429 static void drop_packet(struct sk_buff *skb)
30430 {
30431 - atomic_inc(&cm_accel_dropped_pkts);
30432 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30433 dev_kfree_skb_any(skb);
30434 }
30435
30436 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30437 {
30438
30439 int reset = 0; /* whether to send reset in case of err.. */
30440 - atomic_inc(&cm_resets_recvd);
30441 + atomic_inc_unchecked(&cm_resets_recvd);
30442 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30443 " refcnt=%d\n", cm_node, cm_node->state,
30444 atomic_read(&cm_node->ref_count));
30445 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30446 rem_ref_cm_node(cm_node->cm_core, cm_node);
30447 return NULL;
30448 }
30449 - atomic_inc(&cm_loopbacks);
30450 + atomic_inc_unchecked(&cm_loopbacks);
30451 loopbackremotenode->loopbackpartner = cm_node;
30452 loopbackremotenode->tcp_cntxt.rcv_wscale =
30453 NES_CM_DEFAULT_RCV_WND_SCALE;
30454 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30455 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30456 else {
30457 rem_ref_cm_node(cm_core, cm_node);
30458 - atomic_inc(&cm_accel_dropped_pkts);
30459 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30460 dev_kfree_skb_any(skb);
30461 }
30462 break;
30463 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30464
30465 if ((cm_id) && (cm_id->event_handler)) {
30466 if (issue_disconn) {
30467 - atomic_inc(&cm_disconnects);
30468 + atomic_inc_unchecked(&cm_disconnects);
30469 cm_event.event = IW_CM_EVENT_DISCONNECT;
30470 cm_event.status = disconn_status;
30471 cm_event.local_addr = cm_id->local_addr;
30472 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30473 }
30474
30475 if (issue_close) {
30476 - atomic_inc(&cm_closes);
30477 + atomic_inc_unchecked(&cm_closes);
30478 nes_disconnect(nesqp, 1);
30479
30480 cm_id->provider_data = nesqp;
30481 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30482
30483 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30484 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30485 - atomic_inc(&cm_accepts);
30486 + atomic_inc_unchecked(&cm_accepts);
30487
30488 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30489 netdev_refcnt_read(nesvnic->netdev));
30490 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30491 struct nes_cm_core *cm_core;
30492 u8 *start_buff;
30493
30494 - atomic_inc(&cm_rejects);
30495 + atomic_inc_unchecked(&cm_rejects);
30496 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30497 loopback = cm_node->loopbackpartner;
30498 cm_core = cm_node->cm_core;
30499 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30500 ntohl(cm_id->local_addr.sin_addr.s_addr),
30501 ntohs(cm_id->local_addr.sin_port));
30502
30503 - atomic_inc(&cm_connects);
30504 + atomic_inc_unchecked(&cm_connects);
30505 nesqp->active_conn = 1;
30506
30507 /* cache the cm_id in the qp */
30508 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30509 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30510 return err;
30511 }
30512 - atomic_inc(&cm_listens_created);
30513 + atomic_inc_unchecked(&cm_listens_created);
30514 }
30515
30516 cm_id->add_ref(cm_id);
30517 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30518
30519 if (nesqp->destroyed)
30520 return;
30521 - atomic_inc(&cm_connecteds);
30522 + atomic_inc_unchecked(&cm_connecteds);
30523 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30524 " local port 0x%04X. jiffies = %lu.\n",
30525 nesqp->hwqp.qp_id,
30526 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30527
30528 cm_id->add_ref(cm_id);
30529 ret = cm_id->event_handler(cm_id, &cm_event);
30530 - atomic_inc(&cm_closes);
30531 + atomic_inc_unchecked(&cm_closes);
30532 cm_event.event = IW_CM_EVENT_CLOSE;
30533 cm_event.status = 0;
30534 cm_event.provider_data = cm_id->provider_data;
30535 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30536 return;
30537 cm_id = cm_node->cm_id;
30538
30539 - atomic_inc(&cm_connect_reqs);
30540 + atomic_inc_unchecked(&cm_connect_reqs);
30541 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30542 cm_node, cm_id, jiffies);
30543
30544 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30545 return;
30546 cm_id = cm_node->cm_id;
30547
30548 - atomic_inc(&cm_connect_reqs);
30549 + atomic_inc_unchecked(&cm_connect_reqs);
30550 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30551 cm_node, cm_id, jiffies);
30552
30553 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30554 index b3b2a24..7bfaf1e 100644
30555 --- a/drivers/infiniband/hw/nes/nes_mgt.c
30556 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
30557 @@ -40,8 +40,8 @@
30558 #include "nes.h"
30559 #include "nes_mgt.h"
30560
30561 -atomic_t pau_qps_created;
30562 -atomic_t pau_qps_destroyed;
30563 +atomic_unchecked_t pau_qps_created;
30564 +atomic_unchecked_t pau_qps_destroyed;
30565
30566 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30567 {
30568 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30569 {
30570 struct sk_buff *skb;
30571 unsigned long flags;
30572 - atomic_inc(&pau_qps_destroyed);
30573 + atomic_inc_unchecked(&pau_qps_destroyed);
30574
30575 /* Free packets that have not yet been forwarded */
30576 /* Lock is acquired by skb_dequeue when removing the skb */
30577 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30578 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30579 skb_queue_head_init(&nesqp->pau_list);
30580 spin_lock_init(&nesqp->pau_lock);
30581 - atomic_inc(&pau_qps_created);
30582 + atomic_inc_unchecked(&pau_qps_created);
30583 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30584 }
30585
30586 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30587 index c00d2f3..8834298 100644
30588 --- a/drivers/infiniband/hw/nes/nes_nic.c
30589 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30590 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30591 target_stat_values[++index] = mh_detected;
30592 target_stat_values[++index] = mh_pauses_sent;
30593 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30594 - target_stat_values[++index] = atomic_read(&cm_connects);
30595 - target_stat_values[++index] = atomic_read(&cm_accepts);
30596 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30597 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30598 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30599 - target_stat_values[++index] = atomic_read(&cm_rejects);
30600 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30601 - target_stat_values[++index] = atomic_read(&qps_created);
30602 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30603 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30604 - target_stat_values[++index] = atomic_read(&cm_closes);
30605 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30606 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30607 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30608 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30609 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30610 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30611 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30612 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30613 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30614 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30615 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30616 target_stat_values[++index] = cm_packets_sent;
30617 target_stat_values[++index] = cm_packets_bounced;
30618 target_stat_values[++index] = cm_packets_created;
30619 target_stat_values[++index] = cm_packets_received;
30620 target_stat_values[++index] = cm_packets_dropped;
30621 target_stat_values[++index] = cm_packets_retrans;
30622 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30623 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30624 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30625 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30626 target_stat_values[++index] = cm_backlog_drops;
30627 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30628 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30629 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30630 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30631 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30632 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30633 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30634 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30635 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30636 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30637 target_stat_values[++index] = nesadapter->free_4kpbl;
30638 target_stat_values[++index] = nesadapter->free_256pbl;
30639 target_stat_values[++index] = int_mod_timer_init;
30640 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30641 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30642 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30643 - target_stat_values[++index] = atomic_read(&pau_qps_created);
30644 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30645 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30646 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30647 }
30648
30649 /**
30650 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30651 index 5095bc4..41e8fff 100644
30652 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30653 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30654 @@ -46,9 +46,9 @@
30655
30656 #include <rdma/ib_umem.h>
30657
30658 -atomic_t mod_qp_timouts;
30659 -atomic_t qps_created;
30660 -atomic_t sw_qps_destroyed;
30661 +atomic_unchecked_t mod_qp_timouts;
30662 +atomic_unchecked_t qps_created;
30663 +atomic_unchecked_t sw_qps_destroyed;
30664
30665 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30666
30667 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30668 if (init_attr->create_flags)
30669 return ERR_PTR(-EINVAL);
30670
30671 - atomic_inc(&qps_created);
30672 + atomic_inc_unchecked(&qps_created);
30673 switch (init_attr->qp_type) {
30674 case IB_QPT_RC:
30675 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30676 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30677 struct iw_cm_event cm_event;
30678 int ret = 0;
30679
30680 - atomic_inc(&sw_qps_destroyed);
30681 + atomic_inc_unchecked(&sw_qps_destroyed);
30682 nesqp->destroyed = 1;
30683
30684 /* Blow away the connection if it exists. */
30685 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30686 index b881bdc..c2e360c 100644
30687 --- a/drivers/infiniband/hw/qib/qib.h
30688 +++ b/drivers/infiniband/hw/qib/qib.h
30689 @@ -51,6 +51,7 @@
30690 #include <linux/completion.h>
30691 #include <linux/kref.h>
30692 #include <linux/sched.h>
30693 +#include <linux/slab.h>
30694
30695 #include "qib_common.h"
30696 #include "qib_verbs.h"
30697 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30698 index c351aa4..e6967c2 100644
30699 --- a/drivers/input/gameport/gameport.c
30700 +++ b/drivers/input/gameport/gameport.c
30701 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30702 */
30703 static void gameport_init_port(struct gameport *gameport)
30704 {
30705 - static atomic_t gameport_no = ATOMIC_INIT(0);
30706 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30707
30708 __module_get(THIS_MODULE);
30709
30710 mutex_init(&gameport->drv_mutex);
30711 device_initialize(&gameport->dev);
30712 dev_set_name(&gameport->dev, "gameport%lu",
30713 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30714 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30715 gameport->dev.bus = &gameport_bus;
30716 gameport->dev.release = gameport_release_port;
30717 if (gameport->parent)
30718 diff --git a/drivers/input/input.c b/drivers/input/input.c
30719 index da38d97..2aa0b79 100644
30720 --- a/drivers/input/input.c
30721 +++ b/drivers/input/input.c
30722 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30723 */
30724 int input_register_device(struct input_dev *dev)
30725 {
30726 - static atomic_t input_no = ATOMIC_INIT(0);
30727 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30728 struct input_handler *handler;
30729 const char *path;
30730 int error;
30731 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30732 dev->setkeycode = input_default_setkeycode;
30733
30734 dev_set_name(&dev->dev, "input%ld",
30735 - (unsigned long) atomic_inc_return(&input_no) - 1);
30736 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30737
30738 error = device_add(&dev->dev);
30739 if (error)
30740 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30741 index b8d8611..7a4a04b 100644
30742 --- a/drivers/input/joystick/sidewinder.c
30743 +++ b/drivers/input/joystick/sidewinder.c
30744 @@ -30,6 +30,7 @@
30745 #include <linux/kernel.h>
30746 #include <linux/module.h>
30747 #include <linux/slab.h>
30748 +#include <linux/sched.h>
30749 #include <linux/init.h>
30750 #include <linux/input.h>
30751 #include <linux/gameport.h>
30752 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30753 index d728875..844c89b 100644
30754 --- a/drivers/input/joystick/xpad.c
30755 +++ b/drivers/input/joystick/xpad.c
30756 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30757
30758 static int xpad_led_probe(struct usb_xpad *xpad)
30759 {
30760 - static atomic_t led_seq = ATOMIC_INIT(0);
30761 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30762 long led_no;
30763 struct xpad_led *led;
30764 struct led_classdev *led_cdev;
30765 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30766 if (!led)
30767 return -ENOMEM;
30768
30769 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30770 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30771
30772 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30773 led->xpad = xpad;
30774 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30775 index 0110b5a..d3ad144 100644
30776 --- a/drivers/input/mousedev.c
30777 +++ b/drivers/input/mousedev.c
30778 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30779
30780 spin_unlock_irq(&client->packet_lock);
30781
30782 - if (copy_to_user(buffer, data, count))
30783 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30784 return -EFAULT;
30785
30786 return count;
30787 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30788 index ba70058..571d25d 100644
30789 --- a/drivers/input/serio/serio.c
30790 +++ b/drivers/input/serio/serio.c
30791 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30792 */
30793 static void serio_init_port(struct serio *serio)
30794 {
30795 - static atomic_t serio_no = ATOMIC_INIT(0);
30796 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30797
30798 __module_get(THIS_MODULE);
30799
30800 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30801 mutex_init(&serio->drv_mutex);
30802 device_initialize(&serio->dev);
30803 dev_set_name(&serio->dev, "serio%ld",
30804 - (long)atomic_inc_return(&serio_no) - 1);
30805 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30806 serio->dev.bus = &serio_bus;
30807 serio->dev.release = serio_release_port;
30808 serio->dev.groups = serio_device_attr_groups;
30809 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30810 index e44933d..9ba484a 100644
30811 --- a/drivers/isdn/capi/capi.c
30812 +++ b/drivers/isdn/capi/capi.c
30813 @@ -83,8 +83,8 @@ struct capiminor {
30814
30815 struct capi20_appl *ap;
30816 u32 ncci;
30817 - atomic_t datahandle;
30818 - atomic_t msgid;
30819 + atomic_unchecked_t datahandle;
30820 + atomic_unchecked_t msgid;
30821
30822 struct tty_port port;
30823 int ttyinstop;
30824 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30825 capimsg_setu16(s, 2, mp->ap->applid);
30826 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30827 capimsg_setu8 (s, 5, CAPI_RESP);
30828 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30829 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30830 capimsg_setu32(s, 8, mp->ncci);
30831 capimsg_setu16(s, 12, datahandle);
30832 }
30833 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30834 mp->outbytes -= len;
30835 spin_unlock_bh(&mp->outlock);
30836
30837 - datahandle = atomic_inc_return(&mp->datahandle);
30838 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30839 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30840 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30841 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30842 capimsg_setu16(skb->data, 2, mp->ap->applid);
30843 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30844 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30845 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30846 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30847 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30848 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30849 capimsg_setu16(skb->data, 16, len); /* Data length */
30850 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30851 index db621db..825ea1a 100644
30852 --- a/drivers/isdn/gigaset/common.c
30853 +++ b/drivers/isdn/gigaset/common.c
30854 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30855 cs->commands_pending = 0;
30856 cs->cur_at_seq = 0;
30857 cs->gotfwver = -1;
30858 - cs->open_count = 0;
30859 + local_set(&cs->open_count, 0);
30860 cs->dev = NULL;
30861 cs->tty = NULL;
30862 cs->tty_dev = NULL;
30863 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30864 index 212efaf..f187c6b 100644
30865 --- a/drivers/isdn/gigaset/gigaset.h
30866 +++ b/drivers/isdn/gigaset/gigaset.h
30867 @@ -35,6 +35,7 @@
30868 #include <linux/tty_driver.h>
30869 #include <linux/list.h>
30870 #include <linux/atomic.h>
30871 +#include <asm/local.h>
30872
30873 #define GIG_VERSION {0, 5, 0, 0}
30874 #define GIG_COMPAT {0, 4, 0, 0}
30875 @@ -433,7 +434,7 @@ struct cardstate {
30876 spinlock_t cmdlock;
30877 unsigned curlen, cmdbytes;
30878
30879 - unsigned open_count;
30880 + local_t open_count;
30881 struct tty_struct *tty;
30882 struct tasklet_struct if_wake_tasklet;
30883 unsigned control_state;
30884 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30885 index ee0a549..a7c9798 100644
30886 --- a/drivers/isdn/gigaset/interface.c
30887 +++ b/drivers/isdn/gigaset/interface.c
30888 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30889 }
30890 tty->driver_data = cs;
30891
30892 - ++cs->open_count;
30893 -
30894 - if (cs->open_count == 1) {
30895 + if (local_inc_return(&cs->open_count) == 1) {
30896 spin_lock_irqsave(&cs->lock, flags);
30897 cs->tty = tty;
30898 spin_unlock_irqrestore(&cs->lock, flags);
30899 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30900
30901 if (!cs->connected)
30902 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30903 - else if (!cs->open_count)
30904 + else if (!local_read(&cs->open_count))
30905 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30906 else {
30907 - if (!--cs->open_count) {
30908 + if (!local_dec_return(&cs->open_count)) {
30909 spin_lock_irqsave(&cs->lock, flags);
30910 cs->tty = NULL;
30911 spin_unlock_irqrestore(&cs->lock, flags);
30912 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30913 if (!cs->connected) {
30914 gig_dbg(DEBUG_IF, "not connected");
30915 retval = -ENODEV;
30916 - } else if (!cs->open_count)
30917 + } else if (!local_read(&cs->open_count))
30918 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30919 else {
30920 retval = 0;
30921 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30922 retval = -ENODEV;
30923 goto done;
30924 }
30925 - if (!cs->open_count) {
30926 + if (!local_read(&cs->open_count)) {
30927 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30928 retval = -ENODEV;
30929 goto done;
30930 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30931 if (!cs->connected) {
30932 gig_dbg(DEBUG_IF, "not connected");
30933 retval = -ENODEV;
30934 - } else if (!cs->open_count)
30935 + } else if (!local_read(&cs->open_count))
30936 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30937 else if (cs->mstate != MS_LOCKED) {
30938 dev_warn(cs->dev, "can't write to unlocked device\n");
30939 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30940
30941 if (!cs->connected)
30942 gig_dbg(DEBUG_IF, "not connected");
30943 - else if (!cs->open_count)
30944 + else if (!local_read(&cs->open_count))
30945 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30946 else if (cs->mstate != MS_LOCKED)
30947 dev_warn(cs->dev, "can't write to unlocked device\n");
30948 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30949
30950 if (!cs->connected)
30951 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30952 - else if (!cs->open_count)
30953 + else if (!local_read(&cs->open_count))
30954 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30955 else
30956 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30957 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30958
30959 if (!cs->connected)
30960 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30961 - else if (!cs->open_count)
30962 + else if (!local_read(&cs->open_count))
30963 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30964 else
30965 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30966 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30967 goto out;
30968 }
30969
30970 - if (!cs->open_count) {
30971 + if (!local_read(&cs->open_count)) {
30972 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30973 goto out;
30974 }
30975 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30976 index 2a57da59..e7a12ed 100644
30977 --- a/drivers/isdn/hardware/avm/b1.c
30978 +++ b/drivers/isdn/hardware/avm/b1.c
30979 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30980 }
30981 if (left) {
30982 if (t4file->user) {
30983 - if (copy_from_user(buf, dp, left))
30984 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30985 return -EFAULT;
30986 } else {
30987 memcpy(buf, dp, left);
30988 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30989 }
30990 if (left) {
30991 if (config->user) {
30992 - if (copy_from_user(buf, dp, left))
30993 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30994 return -EFAULT;
30995 } else {
30996 memcpy(buf, dp, left);
30997 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30998 index 85784a7..a19ca98 100644
30999 --- a/drivers/isdn/hardware/eicon/divasync.h
31000 +++ b/drivers/isdn/hardware/eicon/divasync.h
31001 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31002 } diva_didd_add_adapter_t;
31003 typedef struct _diva_didd_remove_adapter {
31004 IDI_CALL p_request;
31005 -} diva_didd_remove_adapter_t;
31006 +} __no_const diva_didd_remove_adapter_t;
31007 typedef struct _diva_didd_read_adapter_array {
31008 void * buffer;
31009 dword length;
31010 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
31011 index a3bd163..8956575 100644
31012 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
31013 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
31014 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31015 typedef struct _diva_os_idi_adapter_interface {
31016 diva_init_card_proc_t cleanup_adapter_proc;
31017 diva_cmd_card_proc_t cmd_proc;
31018 -} diva_os_idi_adapter_interface_t;
31019 +} __no_const diva_os_idi_adapter_interface_t;
31020
31021 typedef struct _diva_os_xdi_adapter {
31022 struct list_head link;
31023 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
31024 index 2339d73..802ab87 100644
31025 --- a/drivers/isdn/i4l/isdn_net.c
31026 +++ b/drivers/isdn/i4l/isdn_net.c
31027 @@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
31028 {
31029 isdn_net_local *lp = netdev_priv(dev);
31030 unsigned char *p;
31031 - ushort len = 0;
31032 + int len = 0;
31033
31034 switch (lp->p_encap) {
31035 case ISDN_NET_ENCAP_ETHER:
31036 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
31037 index 1f355bb..43f1fea 100644
31038 --- a/drivers/isdn/icn/icn.c
31039 +++ b/drivers/isdn/icn/icn.c
31040 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
31041 if (count > len)
31042 count = len;
31043 if (user) {
31044 - if (copy_from_user(msg, buf, count))
31045 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31046 return -EFAULT;
31047 } else
31048 memcpy(msg, buf, count);
31049 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
31050 index b5fdcb7..5b6c59f 100644
31051 --- a/drivers/lguest/core.c
31052 +++ b/drivers/lguest/core.c
31053 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
31054 * it's worked so far. The end address needs +1 because __get_vm_area
31055 * allocates an extra guard page, so we need space for that.
31056 */
31057 +
31058 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31059 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31060 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31061 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31062 +#else
31063 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31064 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31065 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31066 +#endif
31067 +
31068 if (!switcher_vma) {
31069 err = -ENOMEM;
31070 printk("lguest: could not map switcher pages high\n");
31071 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
31072 * Now the Switcher is mapped at the right address, we can't fail!
31073 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
31074 */
31075 - memcpy(switcher_vma->addr, start_switcher_text,
31076 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31077 end_switcher_text - start_switcher_text);
31078
31079 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31080 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
31081 index 65af42f..530c87a 100644
31082 --- a/drivers/lguest/x86/core.c
31083 +++ b/drivers/lguest/x86/core.c
31084 @@ -59,7 +59,7 @@ static struct {
31085 /* Offset from where switcher.S was compiled to where we've copied it */
31086 static unsigned long switcher_offset(void)
31087 {
31088 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31089 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31090 }
31091
31092 /* This cpu's struct lguest_pages. */
31093 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
31094 * These copies are pretty cheap, so we do them unconditionally: */
31095 /* Save the current Host top-level page directory.
31096 */
31097 +
31098 +#ifdef CONFIG_PAX_PER_CPU_PGD
31099 + pages->state.host_cr3 = read_cr3();
31100 +#else
31101 pages->state.host_cr3 = __pa(current->mm->pgd);
31102 +#endif
31103 +
31104 /*
31105 * Set up the Guest's page tables to see this CPU's pages (and no
31106 * other CPU's pages).
31107 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31108 * compiled-in switcher code and the high-mapped copy we just made.
31109 */
31110 for (i = 0; i < IDT_ENTRIES; i++)
31111 - default_idt_entries[i] += switcher_offset();
31112 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31113
31114 /*
31115 * Set up the Switcher's per-cpu areas.
31116 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31117 * it will be undisturbed when we switch. To change %cs and jump we
31118 * need this structure to feed to Intel's "lcall" instruction.
31119 */
31120 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31121 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31122 lguest_entry.segment = LGUEST_CS;
31123
31124 /*
31125 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31126 index 40634b0..4f5855e 100644
31127 --- a/drivers/lguest/x86/switcher_32.S
31128 +++ b/drivers/lguest/x86/switcher_32.S
31129 @@ -87,6 +87,7 @@
31130 #include <asm/page.h>
31131 #include <asm/segment.h>
31132 #include <asm/lguest.h>
31133 +#include <asm/processor-flags.h>
31134
31135 // We mark the start of the code to copy
31136 // It's placed in .text tho it's never run here
31137 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31138 // Changes type when we load it: damn Intel!
31139 // For after we switch over our page tables
31140 // That entry will be read-only: we'd crash.
31141 +
31142 +#ifdef CONFIG_PAX_KERNEXEC
31143 + mov %cr0, %edx
31144 + xor $X86_CR0_WP, %edx
31145 + mov %edx, %cr0
31146 +#endif
31147 +
31148 movl $(GDT_ENTRY_TSS*8), %edx
31149 ltr %dx
31150
31151 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31152 // Let's clear it again for our return.
31153 // The GDT descriptor of the Host
31154 // Points to the table after two "size" bytes
31155 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31156 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31157 // Clear "used" from type field (byte 5, bit 2)
31158 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31159 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31160 +
31161 +#ifdef CONFIG_PAX_KERNEXEC
31162 + mov %cr0, %eax
31163 + xor $X86_CR0_WP, %eax
31164 + mov %eax, %cr0
31165 +#endif
31166
31167 // Once our page table's switched, the Guest is live!
31168 // The Host fades as we run this final step.
31169 @@ -295,13 +309,12 @@ deliver_to_host:
31170 // I consulted gcc, and it gave
31171 // These instructions, which I gladly credit:
31172 leal (%edx,%ebx,8), %eax
31173 - movzwl (%eax),%edx
31174 - movl 4(%eax), %eax
31175 - xorw %ax, %ax
31176 - orl %eax, %edx
31177 + movl 4(%eax), %edx
31178 + movw (%eax), %dx
31179 // Now the address of the handler's in %edx
31180 // We call it now: its "iret" drops us home.
31181 - jmp *%edx
31182 + ljmp $__KERNEL_CS, $1f
31183 +1: jmp *%edx
31184
31185 // Every interrupt can come to us here
31186 // But we must truly tell each apart.
31187 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31188 index 4daf9e5..b8d1d0f 100644
31189 --- a/drivers/macintosh/macio_asic.c
31190 +++ b/drivers/macintosh/macio_asic.c
31191 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31192 * MacIO is matched against any Apple ID, it's probe() function
31193 * will then decide wether it applies or not
31194 */
31195 -static const struct pci_device_id __devinitdata pci_ids [] = { {
31196 +static const struct pci_device_id __devinitconst pci_ids [] = { {
31197 .vendor = PCI_VENDOR_ID_APPLE,
31198 .device = PCI_ANY_ID,
31199 .subvendor = PCI_ANY_ID,
31200 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31201 index 31c2dc2..a2de7a6 100644
31202 --- a/drivers/md/dm-ioctl.c
31203 +++ b/drivers/md/dm-ioctl.c
31204 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31205 cmd == DM_LIST_VERSIONS_CMD)
31206 return 0;
31207
31208 - if ((cmd == DM_DEV_CREATE_CMD)) {
31209 + if (cmd == DM_DEV_CREATE_CMD) {
31210 if (!*param->name) {
31211 DMWARN("name not supplied when creating device");
31212 return -EINVAL;
31213 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31214 index 9bfd057..01180bc 100644
31215 --- a/drivers/md/dm-raid1.c
31216 +++ b/drivers/md/dm-raid1.c
31217 @@ -40,7 +40,7 @@ enum dm_raid1_error {
31218
31219 struct mirror {
31220 struct mirror_set *ms;
31221 - atomic_t error_count;
31222 + atomic_unchecked_t error_count;
31223 unsigned long error_type;
31224 struct dm_dev *dev;
31225 sector_t offset;
31226 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31227 struct mirror *m;
31228
31229 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31230 - if (!atomic_read(&m->error_count))
31231 + if (!atomic_read_unchecked(&m->error_count))
31232 return m;
31233
31234 return NULL;
31235 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31236 * simple way to tell if a device has encountered
31237 * errors.
31238 */
31239 - atomic_inc(&m->error_count);
31240 + atomic_inc_unchecked(&m->error_count);
31241
31242 if (test_and_set_bit(error_type, &m->error_type))
31243 return;
31244 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31245 struct mirror *m = get_default_mirror(ms);
31246
31247 do {
31248 - if (likely(!atomic_read(&m->error_count)))
31249 + if (likely(!atomic_read_unchecked(&m->error_count)))
31250 return m;
31251
31252 if (m-- == ms->mirror)
31253 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31254 {
31255 struct mirror *default_mirror = get_default_mirror(m->ms);
31256
31257 - return !atomic_read(&default_mirror->error_count);
31258 + return !atomic_read_unchecked(&default_mirror->error_count);
31259 }
31260
31261 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31262 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31263 */
31264 if (likely(region_in_sync(ms, region, 1)))
31265 m = choose_mirror(ms, bio->bi_sector);
31266 - else if (m && atomic_read(&m->error_count))
31267 + else if (m && atomic_read_unchecked(&m->error_count))
31268 m = NULL;
31269
31270 if (likely(m))
31271 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31272 }
31273
31274 ms->mirror[mirror].ms = ms;
31275 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31276 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31277 ms->mirror[mirror].error_type = 0;
31278 ms->mirror[mirror].offset = offset;
31279
31280 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31281 */
31282 static char device_status_char(struct mirror *m)
31283 {
31284 - if (!atomic_read(&(m->error_count)))
31285 + if (!atomic_read_unchecked(&(m->error_count)))
31286 return 'A';
31287
31288 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31289 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31290 index 3d80cf0..b77cc47 100644
31291 --- a/drivers/md/dm-stripe.c
31292 +++ b/drivers/md/dm-stripe.c
31293 @@ -20,7 +20,7 @@ struct stripe {
31294 struct dm_dev *dev;
31295 sector_t physical_start;
31296
31297 - atomic_t error_count;
31298 + atomic_unchecked_t error_count;
31299 };
31300
31301 struct stripe_c {
31302 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31303 kfree(sc);
31304 return r;
31305 }
31306 - atomic_set(&(sc->stripe[i].error_count), 0);
31307 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31308 }
31309
31310 ti->private = sc;
31311 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31312 DMEMIT("%d ", sc->stripes);
31313 for (i = 0; i < sc->stripes; i++) {
31314 DMEMIT("%s ", sc->stripe[i].dev->name);
31315 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31316 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31317 'D' : 'A';
31318 }
31319 buffer[i] = '\0';
31320 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31321 */
31322 for (i = 0; i < sc->stripes; i++)
31323 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31324 - atomic_inc(&(sc->stripe[i].error_count));
31325 - if (atomic_read(&(sc->stripe[i].error_count)) <
31326 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31327 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31328 DM_IO_ERROR_THRESHOLD)
31329 schedule_work(&sc->trigger_event);
31330 }
31331 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31332 index 8e91321..fd17aef 100644
31333 --- a/drivers/md/dm-table.c
31334 +++ b/drivers/md/dm-table.c
31335 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31336 if (!dev_size)
31337 return 0;
31338
31339 - if ((start >= dev_size) || (start + len > dev_size)) {
31340 + if ((start >= dev_size) || (len > dev_size - start)) {
31341 DMWARN("%s: %s too small for target: "
31342 "start=%llu, len=%llu, dev_size=%llu",
31343 dm_device_name(ti->table->md), bdevname(bdev, b),
31344 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31345 index 59c4f04..4c7b661 100644
31346 --- a/drivers/md/dm-thin-metadata.c
31347 +++ b/drivers/md/dm-thin-metadata.c
31348 @@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31349
31350 pmd->info.tm = tm;
31351 pmd->info.levels = 2;
31352 - pmd->info.value_type.context = pmd->data_sm;
31353 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31354 pmd->info.value_type.size = sizeof(__le64);
31355 pmd->info.value_type.inc = data_block_inc;
31356 pmd->info.value_type.dec = data_block_dec;
31357 @@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31358
31359 pmd->bl_info.tm = tm;
31360 pmd->bl_info.levels = 1;
31361 - pmd->bl_info.value_type.context = pmd->data_sm;
31362 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31363 pmd->bl_info.value_type.size = sizeof(__le64);
31364 pmd->bl_info.value_type.inc = data_block_inc;
31365 pmd->bl_info.value_type.dec = data_block_dec;
31366 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31367 index 4720f68..78d1df7 100644
31368 --- a/drivers/md/dm.c
31369 +++ b/drivers/md/dm.c
31370 @@ -177,9 +177,9 @@ struct mapped_device {
31371 /*
31372 * Event handling.
31373 */
31374 - atomic_t event_nr;
31375 + atomic_unchecked_t event_nr;
31376 wait_queue_head_t eventq;
31377 - atomic_t uevent_seq;
31378 + atomic_unchecked_t uevent_seq;
31379 struct list_head uevent_list;
31380 spinlock_t uevent_lock; /* Protect access to uevent_list */
31381
31382 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31383 rwlock_init(&md->map_lock);
31384 atomic_set(&md->holders, 1);
31385 atomic_set(&md->open_count, 0);
31386 - atomic_set(&md->event_nr, 0);
31387 - atomic_set(&md->uevent_seq, 0);
31388 + atomic_set_unchecked(&md->event_nr, 0);
31389 + atomic_set_unchecked(&md->uevent_seq, 0);
31390 INIT_LIST_HEAD(&md->uevent_list);
31391 spin_lock_init(&md->uevent_lock);
31392
31393 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31394
31395 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31396
31397 - atomic_inc(&md->event_nr);
31398 + atomic_inc_unchecked(&md->event_nr);
31399 wake_up(&md->eventq);
31400 }
31401
31402 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31403
31404 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31405 {
31406 - return atomic_add_return(1, &md->uevent_seq);
31407 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31408 }
31409
31410 uint32_t dm_get_event_nr(struct mapped_device *md)
31411 {
31412 - return atomic_read(&md->event_nr);
31413 + return atomic_read_unchecked(&md->event_nr);
31414 }
31415
31416 int dm_wait_event(struct mapped_device *md, int event_nr)
31417 {
31418 return wait_event_interruptible(md->eventq,
31419 - (event_nr != atomic_read(&md->event_nr)));
31420 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31421 }
31422
31423 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31424 diff --git a/drivers/md/md.c b/drivers/md/md.c
31425 index f47f1f8..b7f559e 100644
31426 --- a/drivers/md/md.c
31427 +++ b/drivers/md/md.c
31428 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31429 * start build, activate spare
31430 */
31431 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31432 -static atomic_t md_event_count;
31433 +static atomic_unchecked_t md_event_count;
31434 void md_new_event(struct mddev *mddev)
31435 {
31436 - atomic_inc(&md_event_count);
31437 + atomic_inc_unchecked(&md_event_count);
31438 wake_up(&md_event_waiters);
31439 }
31440 EXPORT_SYMBOL_GPL(md_new_event);
31441 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31442 */
31443 static void md_new_event_inintr(struct mddev *mddev)
31444 {
31445 - atomic_inc(&md_event_count);
31446 + atomic_inc_unchecked(&md_event_count);
31447 wake_up(&md_event_waiters);
31448 }
31449
31450 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31451
31452 rdev->preferred_minor = 0xffff;
31453 rdev->data_offset = le64_to_cpu(sb->data_offset);
31454 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31455 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31456
31457 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31458 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31459 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31460 else
31461 sb->resync_offset = cpu_to_le64(0);
31462
31463 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31464 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31465
31466 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31467 sb->size = cpu_to_le64(mddev->dev_sectors);
31468 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31469 static ssize_t
31470 errors_show(struct md_rdev *rdev, char *page)
31471 {
31472 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31473 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31474 }
31475
31476 static ssize_t
31477 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31478 char *e;
31479 unsigned long n = simple_strtoul(buf, &e, 10);
31480 if (*buf && (*e == 0 || *e == '\n')) {
31481 - atomic_set(&rdev->corrected_errors, n);
31482 + atomic_set_unchecked(&rdev->corrected_errors, n);
31483 return len;
31484 }
31485 return -EINVAL;
31486 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31487 rdev->sb_loaded = 0;
31488 rdev->bb_page = NULL;
31489 atomic_set(&rdev->nr_pending, 0);
31490 - atomic_set(&rdev->read_errors, 0);
31491 - atomic_set(&rdev->corrected_errors, 0);
31492 + atomic_set_unchecked(&rdev->read_errors, 0);
31493 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31494
31495 INIT_LIST_HEAD(&rdev->same_set);
31496 init_waitqueue_head(&rdev->blocked_wait);
31497 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31498
31499 spin_unlock(&pers_lock);
31500 seq_printf(seq, "\n");
31501 - seq->poll_event = atomic_read(&md_event_count);
31502 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31503 return 0;
31504 }
31505 if (v == (void*)2) {
31506 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31507 chunk_kb ? "KB" : "B");
31508 if (bitmap->file) {
31509 seq_printf(seq, ", file: ");
31510 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31511 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31512 }
31513
31514 seq_printf(seq, "\n");
31515 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31516 return error;
31517
31518 seq = file->private_data;
31519 - seq->poll_event = atomic_read(&md_event_count);
31520 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31521 return error;
31522 }
31523
31524 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31525 /* always allow read */
31526 mask = POLLIN | POLLRDNORM;
31527
31528 - if (seq->poll_event != atomic_read(&md_event_count))
31529 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31530 mask |= POLLERR | POLLPRI;
31531 return mask;
31532 }
31533 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31534 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31535 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31536 (int)part_stat_read(&disk->part0, sectors[1]) -
31537 - atomic_read(&disk->sync_io);
31538 + atomic_read_unchecked(&disk->sync_io);
31539 /* sync IO will cause sync_io to increase before the disk_stats
31540 * as sync_io is counted when a request starts, and
31541 * disk_stats is counted when it completes.
31542 diff --git a/drivers/md/md.h b/drivers/md/md.h
31543 index cf742d9..7c7c745 100644
31544 --- a/drivers/md/md.h
31545 +++ b/drivers/md/md.h
31546 @@ -120,13 +120,13 @@ struct md_rdev {
31547 * only maintained for arrays that
31548 * support hot removal
31549 */
31550 - atomic_t read_errors; /* number of consecutive read errors that
31551 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31552 * we have tried to ignore.
31553 */
31554 struct timespec last_read_error; /* monotonic time since our
31555 * last read error
31556 */
31557 - atomic_t corrected_errors; /* number of corrected read errors,
31558 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31559 * for reporting to userspace and storing
31560 * in superblock.
31561 */
31562 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31563
31564 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31565 {
31566 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31567 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31568 }
31569
31570 struct md_personality
31571 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31572 index 50ed53b..4f29d7d 100644
31573 --- a/drivers/md/persistent-data/dm-space-map-checker.c
31574 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
31575 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31576 /*----------------------------------------------------------------*/
31577
31578 struct sm_checker {
31579 - struct dm_space_map sm;
31580 + dm_space_map_no_const sm;
31581
31582 struct count_array old_counts;
31583 struct count_array counts;
31584 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31585 index fc469ba..2d91555 100644
31586 --- a/drivers/md/persistent-data/dm-space-map-disk.c
31587 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
31588 @@ -23,7 +23,7 @@
31589 * Space map interface.
31590 */
31591 struct sm_disk {
31592 - struct dm_space_map sm;
31593 + dm_space_map_no_const sm;
31594
31595 struct ll_disk ll;
31596 struct ll_disk old_ll;
31597 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31598 index e89ae5e..062e4c2 100644
31599 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
31600 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31601 @@ -43,7 +43,7 @@ struct block_op {
31602 };
31603
31604 struct sm_metadata {
31605 - struct dm_space_map sm;
31606 + dm_space_map_no_const sm;
31607
31608 struct ll_disk ll;
31609 struct ll_disk old_ll;
31610 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31611 index 1cbfc6b..56e1dbb 100644
31612 --- a/drivers/md/persistent-data/dm-space-map.h
31613 +++ b/drivers/md/persistent-data/dm-space-map.h
31614 @@ -60,6 +60,7 @@ struct dm_space_map {
31615 int (*root_size)(struct dm_space_map *sm, size_t *result);
31616 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31617 };
31618 +typedef struct dm_space_map __no_const dm_space_map_no_const;
31619
31620 /*----------------------------------------------------------------*/
31621
31622 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31623 index 7d9e071..015b1d5 100644
31624 --- a/drivers/md/raid1.c
31625 +++ b/drivers/md/raid1.c
31626 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31627 if (r1_sync_page_io(rdev, sect, s,
31628 bio->bi_io_vec[idx].bv_page,
31629 READ) != 0)
31630 - atomic_add(s, &rdev->corrected_errors);
31631 + atomic_add_unchecked(s, &rdev->corrected_errors);
31632 }
31633 sectors -= s;
31634 sect += s;
31635 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31636 test_bit(In_sync, &rdev->flags)) {
31637 if (r1_sync_page_io(rdev, sect, s,
31638 conf->tmppage, READ)) {
31639 - atomic_add(s, &rdev->corrected_errors);
31640 + atomic_add_unchecked(s, &rdev->corrected_errors);
31641 printk(KERN_INFO
31642 "md/raid1:%s: read error corrected "
31643 "(%d sectors at %llu on %s)\n",
31644 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31645 index 685ddf3..955b087 100644
31646 --- a/drivers/md/raid10.c
31647 +++ b/drivers/md/raid10.c
31648 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31649 /* The write handler will notice the lack of
31650 * R10BIO_Uptodate and record any errors etc
31651 */
31652 - atomic_add(r10_bio->sectors,
31653 + atomic_add_unchecked(r10_bio->sectors,
31654 &conf->mirrors[d].rdev->corrected_errors);
31655
31656 /* for reconstruct, we always reschedule after a read.
31657 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31658 {
31659 struct timespec cur_time_mon;
31660 unsigned long hours_since_last;
31661 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31662 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31663
31664 ktime_get_ts(&cur_time_mon);
31665
31666 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31667 * overflowing the shift of read_errors by hours_since_last.
31668 */
31669 if (hours_since_last >= 8 * sizeof(read_errors))
31670 - atomic_set(&rdev->read_errors, 0);
31671 + atomic_set_unchecked(&rdev->read_errors, 0);
31672 else
31673 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31674 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31675 }
31676
31677 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31678 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31679 return;
31680
31681 check_decay_read_errors(mddev, rdev);
31682 - atomic_inc(&rdev->read_errors);
31683 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31684 + atomic_inc_unchecked(&rdev->read_errors);
31685 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31686 char b[BDEVNAME_SIZE];
31687 bdevname(rdev->bdev, b);
31688
31689 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31690 "md/raid10:%s: %s: Raid device exceeded "
31691 "read_error threshold [cur %d:max %d]\n",
31692 mdname(mddev), b,
31693 - atomic_read(&rdev->read_errors), max_read_errors);
31694 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31695 printk(KERN_NOTICE
31696 "md/raid10:%s: %s: Failing raid device\n",
31697 mdname(mddev), b);
31698 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31699 (unsigned long long)(
31700 sect + rdev->data_offset),
31701 bdevname(rdev->bdev, b));
31702 - atomic_add(s, &rdev->corrected_errors);
31703 + atomic_add_unchecked(s, &rdev->corrected_errors);
31704 }
31705
31706 rdev_dec_pending(rdev, mddev);
31707 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31708 index 858fdbb..b2dac95 100644
31709 --- a/drivers/md/raid5.c
31710 +++ b/drivers/md/raid5.c
31711 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31712 (unsigned long long)(sh->sector
31713 + rdev->data_offset),
31714 bdevname(rdev->bdev, b));
31715 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31716 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31717 clear_bit(R5_ReadError, &sh->dev[i].flags);
31718 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31719 }
31720 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31721 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31722 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31723 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31724 } else {
31725 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31726 int retry = 0;
31727 rdev = conf->disks[i].rdev;
31728
31729 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31730 - atomic_inc(&rdev->read_errors);
31731 + atomic_inc_unchecked(&rdev->read_errors);
31732 if (conf->mddev->degraded >= conf->max_degraded)
31733 printk_ratelimited(
31734 KERN_WARNING
31735 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31736 (unsigned long long)(sh->sector
31737 + rdev->data_offset),
31738 bdn);
31739 - else if (atomic_read(&rdev->read_errors)
31740 + else if (atomic_read_unchecked(&rdev->read_errors)
31741 > conf->max_nr_stripes)
31742 printk(KERN_WARNING
31743 "md/raid:%s: Too many read errors, failing device %s.\n",
31744 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31745 index ba9a643..e474ab5 100644
31746 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31747 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31748 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31749 .subvendor = _subvend, .subdevice = _subdev, \
31750 .driver_data = (unsigned long)&_driverdata }
31751
31752 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31753 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31754 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31755 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31756 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31757 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31758 index a7d876f..8c21b61 100644
31759 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31760 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31761 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31762 union {
31763 dmx_ts_cb ts;
31764 dmx_section_cb sec;
31765 - } cb;
31766 + } __no_const cb;
31767
31768 struct dvb_demux *demux;
31769 void *priv;
31770 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31771 index f732877..d38c35a 100644
31772 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31773 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31774 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31775 const struct dvb_device *template, void *priv, int type)
31776 {
31777 struct dvb_device *dvbdev;
31778 - struct file_operations *dvbdevfops;
31779 + file_operations_no_const *dvbdevfops;
31780 struct device *clsdev;
31781 int minor;
31782 int id;
31783 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31784 index 9f2a02c..5920f88 100644
31785 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31786 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31787 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31788 struct dib0700_adapter_state {
31789 int (*set_param_save) (struct dvb_frontend *,
31790 struct dvb_frontend_parameters *);
31791 -};
31792 +} __no_const;
31793
31794 static int dib7070_set_param_override(struct dvb_frontend *fe,
31795 struct dvb_frontend_parameters *fep)
31796 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31797 index f103ec1..5e8968b 100644
31798 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31799 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31800 @@ -95,7 +95,7 @@ struct su3000_state {
31801
31802 struct s6x0_state {
31803 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31804 -};
31805 +} __no_const;
31806
31807 /* debug */
31808 static int dvb_usb_dw2102_debug;
31809 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31810 index 404f63a..4796533 100644
31811 --- a/drivers/media/dvb/frontends/dib3000.h
31812 +++ b/drivers/media/dvb/frontends/dib3000.h
31813 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31814 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31815 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31816 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31817 -};
31818 +} __no_const;
31819
31820 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31821 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31822 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31823 index 90bf573..e8463da 100644
31824 --- a/drivers/media/dvb/frontends/ds3000.c
31825 +++ b/drivers/media/dvb/frontends/ds3000.c
31826 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31827
31828 for (i = 0; i < 30 ; i++) {
31829 ds3000_read_status(fe, &status);
31830 - if (status && FE_HAS_LOCK)
31831 + if (status & FE_HAS_LOCK)
31832 break;
31833
31834 msleep(10);
31835 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31836 index 0564192..75b16f5 100644
31837 --- a/drivers/media/dvb/ngene/ngene-cards.c
31838 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31839 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31840
31841 /****************************************************************************/
31842
31843 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31844 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31845 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31846 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31847 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31848 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31849 index 16a089f..ab1667d 100644
31850 --- a/drivers/media/radio/radio-cadet.c
31851 +++ b/drivers/media/radio/radio-cadet.c
31852 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31853 unsigned char readbuf[RDS_BUFFER];
31854 int i = 0;
31855
31856 + if (count > RDS_BUFFER)
31857 + return -EFAULT;
31858 mutex_lock(&dev->lock);
31859 if (dev->rdsstat == 0) {
31860 dev->rdsstat = 1;
31861 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31862 index 61287fc..8b08712 100644
31863 --- a/drivers/media/rc/redrat3.c
31864 +++ b/drivers/media/rc/redrat3.c
31865 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31866 return carrier;
31867 }
31868
31869 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31870 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31871 {
31872 struct redrat3_dev *rr3 = rcdev->priv;
31873 struct device *dev = rr3->dev;
31874 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31875 index 9cde353..8c6a1c3 100644
31876 --- a/drivers/media/video/au0828/au0828.h
31877 +++ b/drivers/media/video/au0828/au0828.h
31878 @@ -191,7 +191,7 @@ struct au0828_dev {
31879
31880 /* I2C */
31881 struct i2c_adapter i2c_adap;
31882 - struct i2c_algorithm i2c_algo;
31883 + i2c_algorithm_no_const i2c_algo;
31884 struct i2c_client i2c_client;
31885 u32 i2c_rc;
31886
31887 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31888 index 68d1240..46b32eb 100644
31889 --- a/drivers/media/video/cx88/cx88-alsa.c
31890 +++ b/drivers/media/video/cx88/cx88-alsa.c
31891 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31892 * Only boards with eeprom and byte 1 at eeprom=1 have it
31893 */
31894
31895 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31896 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31897 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31898 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31899 {0, }
31900 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31901 index 305e6aa..0143317 100644
31902 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31903 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31904 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31905
31906 /* I2C stuff */
31907 struct i2c_adapter i2c_adap;
31908 - struct i2c_algorithm i2c_algo;
31909 + i2c_algorithm_no_const i2c_algo;
31910 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31911 int i2c_cx25840_hack_state;
31912 int i2c_linked;
31913 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31914 index a0895bf..b7ebb1b 100644
31915 --- a/drivers/media/video/timblogiw.c
31916 +++ b/drivers/media/video/timblogiw.c
31917 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31918
31919 /* Platform device functions */
31920
31921 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31922 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31923 .vidioc_querycap = timblogiw_querycap,
31924 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31925 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31926 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31927 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31928 };
31929
31930 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31931 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31932 .owner = THIS_MODULE,
31933 .open = timblogiw_open,
31934 .release = timblogiw_close,
31935 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31936 index e9c6a60..daf6a33 100644
31937 --- a/drivers/message/fusion/mptbase.c
31938 +++ b/drivers/message/fusion/mptbase.c
31939 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31940 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31941 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31942
31943 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31944 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31945 +#else
31946 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31947 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31948 +#endif
31949 +
31950 /*
31951 * Rounding UP to nearest 4-kB boundary here...
31952 */
31953 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31954 index 9d95042..b808101 100644
31955 --- a/drivers/message/fusion/mptsas.c
31956 +++ b/drivers/message/fusion/mptsas.c
31957 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31958 return 0;
31959 }
31960
31961 +static inline void
31962 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31963 +{
31964 + if (phy_info->port_details) {
31965 + phy_info->port_details->rphy = rphy;
31966 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31967 + ioc->name, rphy));
31968 + }
31969 +
31970 + if (rphy) {
31971 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31972 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31973 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31974 + ioc->name, rphy, rphy->dev.release));
31975 + }
31976 +}
31977 +
31978 /* no mutex */
31979 static void
31980 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31981 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31982 return NULL;
31983 }
31984
31985 -static inline void
31986 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31987 -{
31988 - if (phy_info->port_details) {
31989 - phy_info->port_details->rphy = rphy;
31990 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31991 - ioc->name, rphy));
31992 - }
31993 -
31994 - if (rphy) {
31995 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31996 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31997 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31998 - ioc->name, rphy, rphy->dev.release));
31999 - }
32000 -}
32001 -
32002 static inline struct sas_port *
32003 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32004 {
32005 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
32006 index 0c3ced7..1fe34ec 100644
32007 --- a/drivers/message/fusion/mptscsih.c
32008 +++ b/drivers/message/fusion/mptscsih.c
32009 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32010
32011 h = shost_priv(SChost);
32012
32013 - if (h) {
32014 - if (h->info_kbuf == NULL)
32015 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32016 - return h->info_kbuf;
32017 - h->info_kbuf[0] = '\0';
32018 + if (!h)
32019 + return NULL;
32020
32021 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32022 - h->info_kbuf[size-1] = '\0';
32023 - }
32024 + if (h->info_kbuf == NULL)
32025 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32026 + return h->info_kbuf;
32027 + h->info_kbuf[0] = '\0';
32028 +
32029 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32030 + h->info_kbuf[size-1] = '\0';
32031
32032 return h->info_kbuf;
32033 }
32034 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32035 index 07dbeaf..5533142 100644
32036 --- a/drivers/message/i2o/i2o_proc.c
32037 +++ b/drivers/message/i2o/i2o_proc.c
32038 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32039 "Array Controller Device"
32040 };
32041
32042 -static char *chtostr(u8 * chars, int n)
32043 -{
32044 - char tmp[256];
32045 - tmp[0] = 0;
32046 - return strncat(tmp, (char *)chars, n);
32047 -}
32048 -
32049 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32050 char *group)
32051 {
32052 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32053
32054 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32055 seq_printf(seq, "%-#8x", ddm_table.module_id);
32056 - seq_printf(seq, "%-29s",
32057 - chtostr(ddm_table.module_name_version, 28));
32058 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32059 seq_printf(seq, "%9d ", ddm_table.data_size);
32060 seq_printf(seq, "%8d", ddm_table.code_size);
32061
32062 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32063
32064 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32065 seq_printf(seq, "%-#8x", dst->module_id);
32066 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32067 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32068 + seq_printf(seq, "%-.28s", dst->module_name_version);
32069 + seq_printf(seq, "%-.8s", dst->date);
32070 seq_printf(seq, "%8d ", dst->module_size);
32071 seq_printf(seq, "%8d ", dst->mpb_size);
32072 seq_printf(seq, "0x%04x", dst->module_flags);
32073 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32074 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32075 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32076 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32077 - seq_printf(seq, "Vendor info : %s\n",
32078 - chtostr((u8 *) (work32 + 2), 16));
32079 - seq_printf(seq, "Product info : %s\n",
32080 - chtostr((u8 *) (work32 + 6), 16));
32081 - seq_printf(seq, "Description : %s\n",
32082 - chtostr((u8 *) (work32 + 10), 16));
32083 - seq_printf(seq, "Product rev. : %s\n",
32084 - chtostr((u8 *) (work32 + 14), 8));
32085 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32086 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32087 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32088 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32089
32090 seq_printf(seq, "Serial number : ");
32091 print_serial_number(seq, (u8 *) (work32 + 16),
32092 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32093 }
32094
32095 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32096 - seq_printf(seq, "Module name : %s\n",
32097 - chtostr(result.module_name, 24));
32098 - seq_printf(seq, "Module revision : %s\n",
32099 - chtostr(result.module_rev, 8));
32100 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32101 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32102
32103 seq_printf(seq, "Serial number : ");
32104 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32105 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32106 return 0;
32107 }
32108
32109 - seq_printf(seq, "Device name : %s\n",
32110 - chtostr(result.device_name, 64));
32111 - seq_printf(seq, "Service name : %s\n",
32112 - chtostr(result.service_name, 64));
32113 - seq_printf(seq, "Physical name : %s\n",
32114 - chtostr(result.physical_location, 64));
32115 - seq_printf(seq, "Instance number : %s\n",
32116 - chtostr(result.instance_number, 4));
32117 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32118 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32119 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32120 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32121
32122 return 0;
32123 }
32124 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32125 index a8c08f3..155fe3d 100644
32126 --- a/drivers/message/i2o/iop.c
32127 +++ b/drivers/message/i2o/iop.c
32128 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32129
32130 spin_lock_irqsave(&c->context_list_lock, flags);
32131
32132 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32133 - atomic_inc(&c->context_list_counter);
32134 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32135 + atomic_inc_unchecked(&c->context_list_counter);
32136
32137 - entry->context = atomic_read(&c->context_list_counter);
32138 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32139
32140 list_add(&entry->list, &c->context_list);
32141
32142 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32143
32144 #if BITS_PER_LONG == 64
32145 spin_lock_init(&c->context_list_lock);
32146 - atomic_set(&c->context_list_counter, 0);
32147 + atomic_set_unchecked(&c->context_list_counter, 0);
32148 INIT_LIST_HEAD(&c->context_list);
32149 #endif
32150
32151 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32152 index 7ce65f4..e66e9bc 100644
32153 --- a/drivers/mfd/abx500-core.c
32154 +++ b/drivers/mfd/abx500-core.c
32155 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
32156
32157 struct abx500_device_entry {
32158 struct list_head list;
32159 - struct abx500_ops ops;
32160 + abx500_ops_no_const ops;
32161 struct device *dev;
32162 };
32163
32164 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32165 index 5c2a06a..8fa077c 100644
32166 --- a/drivers/mfd/janz-cmodio.c
32167 +++ b/drivers/mfd/janz-cmodio.c
32168 @@ -13,6 +13,7 @@
32169
32170 #include <linux/kernel.h>
32171 #include <linux/module.h>
32172 +#include <linux/slab.h>
32173 #include <linux/init.h>
32174 #include <linux/pci.h>
32175 #include <linux/interrupt.h>
32176 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32177 index 29d12a7..f900ba4 100644
32178 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
32179 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32180 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
32181 * the lid is closed. This leads to interrupts as soon as a little move
32182 * is done.
32183 */
32184 - atomic_inc(&lis3->count);
32185 + atomic_inc_unchecked(&lis3->count);
32186
32187 wake_up_interruptible(&lis3->misc_wait);
32188 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
32189 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32190 if (lis3->pm_dev)
32191 pm_runtime_get_sync(lis3->pm_dev);
32192
32193 - atomic_set(&lis3->count, 0);
32194 + atomic_set_unchecked(&lis3->count, 0);
32195 return 0;
32196 }
32197
32198 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32199 add_wait_queue(&lis3->misc_wait, &wait);
32200 while (true) {
32201 set_current_state(TASK_INTERRUPTIBLE);
32202 - data = atomic_xchg(&lis3->count, 0);
32203 + data = atomic_xchg_unchecked(&lis3->count, 0);
32204 if (data)
32205 break;
32206
32207 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32208 struct lis3lv02d, miscdev);
32209
32210 poll_wait(file, &lis3->misc_wait, wait);
32211 - if (atomic_read(&lis3->count))
32212 + if (atomic_read_unchecked(&lis3->count))
32213 return POLLIN | POLLRDNORM;
32214 return 0;
32215 }
32216 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32217 index 2b1482a..5d33616 100644
32218 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32219 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32220 @@ -266,7 +266,7 @@ struct lis3lv02d {
32221 struct input_polled_dev *idev; /* input device */
32222 struct platform_device *pdev; /* platform device */
32223 struct regulator_bulk_data regulators[2];
32224 - atomic_t count; /* interrupt count after last read */
32225 + atomic_unchecked_t count; /* interrupt count after last read */
32226 union axis_conversion ac; /* hw -> logical axis */
32227 int mapped_btns[3];
32228
32229 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32230 index 2f30bad..c4c13d0 100644
32231 --- a/drivers/misc/sgi-gru/gruhandles.c
32232 +++ b/drivers/misc/sgi-gru/gruhandles.c
32233 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32234 unsigned long nsec;
32235
32236 nsec = CLKS2NSEC(clks);
32237 - atomic_long_inc(&mcs_op_statistics[op].count);
32238 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32239 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32240 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32241 if (mcs_op_statistics[op].max < nsec)
32242 mcs_op_statistics[op].max = nsec;
32243 }
32244 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32245 index 7768b87..f8aac38 100644
32246 --- a/drivers/misc/sgi-gru/gruprocfs.c
32247 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32248 @@ -32,9 +32,9 @@
32249
32250 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32251
32252 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32253 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32254 {
32255 - unsigned long val = atomic_long_read(v);
32256 + unsigned long val = atomic_long_read_unchecked(v);
32257
32258 seq_printf(s, "%16lu %s\n", val, id);
32259 }
32260 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32261
32262 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32263 for (op = 0; op < mcsop_last; op++) {
32264 - count = atomic_long_read(&mcs_op_statistics[op].count);
32265 - total = atomic_long_read(&mcs_op_statistics[op].total);
32266 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32267 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32268 max = mcs_op_statistics[op].max;
32269 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32270 count ? total / count : 0, max);
32271 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32272 index 5c3ce24..4915ccb 100644
32273 --- a/drivers/misc/sgi-gru/grutables.h
32274 +++ b/drivers/misc/sgi-gru/grutables.h
32275 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32276 * GRU statistics.
32277 */
32278 struct gru_stats_s {
32279 - atomic_long_t vdata_alloc;
32280 - atomic_long_t vdata_free;
32281 - atomic_long_t gts_alloc;
32282 - atomic_long_t gts_free;
32283 - atomic_long_t gms_alloc;
32284 - atomic_long_t gms_free;
32285 - atomic_long_t gts_double_allocate;
32286 - atomic_long_t assign_context;
32287 - atomic_long_t assign_context_failed;
32288 - atomic_long_t free_context;
32289 - atomic_long_t load_user_context;
32290 - atomic_long_t load_kernel_context;
32291 - atomic_long_t lock_kernel_context;
32292 - atomic_long_t unlock_kernel_context;
32293 - atomic_long_t steal_user_context;
32294 - atomic_long_t steal_kernel_context;
32295 - atomic_long_t steal_context_failed;
32296 - atomic_long_t nopfn;
32297 - atomic_long_t asid_new;
32298 - atomic_long_t asid_next;
32299 - atomic_long_t asid_wrap;
32300 - atomic_long_t asid_reuse;
32301 - atomic_long_t intr;
32302 - atomic_long_t intr_cbr;
32303 - atomic_long_t intr_tfh;
32304 - atomic_long_t intr_spurious;
32305 - atomic_long_t intr_mm_lock_failed;
32306 - atomic_long_t call_os;
32307 - atomic_long_t call_os_wait_queue;
32308 - atomic_long_t user_flush_tlb;
32309 - atomic_long_t user_unload_context;
32310 - atomic_long_t user_exception;
32311 - atomic_long_t set_context_option;
32312 - atomic_long_t check_context_retarget_intr;
32313 - atomic_long_t check_context_unload;
32314 - atomic_long_t tlb_dropin;
32315 - atomic_long_t tlb_preload_page;
32316 - atomic_long_t tlb_dropin_fail_no_asid;
32317 - atomic_long_t tlb_dropin_fail_upm;
32318 - atomic_long_t tlb_dropin_fail_invalid;
32319 - atomic_long_t tlb_dropin_fail_range_active;
32320 - atomic_long_t tlb_dropin_fail_idle;
32321 - atomic_long_t tlb_dropin_fail_fmm;
32322 - atomic_long_t tlb_dropin_fail_no_exception;
32323 - atomic_long_t tfh_stale_on_fault;
32324 - atomic_long_t mmu_invalidate_range;
32325 - atomic_long_t mmu_invalidate_page;
32326 - atomic_long_t flush_tlb;
32327 - atomic_long_t flush_tlb_gru;
32328 - atomic_long_t flush_tlb_gru_tgh;
32329 - atomic_long_t flush_tlb_gru_zero_asid;
32330 + atomic_long_unchecked_t vdata_alloc;
32331 + atomic_long_unchecked_t vdata_free;
32332 + atomic_long_unchecked_t gts_alloc;
32333 + atomic_long_unchecked_t gts_free;
32334 + atomic_long_unchecked_t gms_alloc;
32335 + atomic_long_unchecked_t gms_free;
32336 + atomic_long_unchecked_t gts_double_allocate;
32337 + atomic_long_unchecked_t assign_context;
32338 + atomic_long_unchecked_t assign_context_failed;
32339 + atomic_long_unchecked_t free_context;
32340 + atomic_long_unchecked_t load_user_context;
32341 + atomic_long_unchecked_t load_kernel_context;
32342 + atomic_long_unchecked_t lock_kernel_context;
32343 + atomic_long_unchecked_t unlock_kernel_context;
32344 + atomic_long_unchecked_t steal_user_context;
32345 + atomic_long_unchecked_t steal_kernel_context;
32346 + atomic_long_unchecked_t steal_context_failed;
32347 + atomic_long_unchecked_t nopfn;
32348 + atomic_long_unchecked_t asid_new;
32349 + atomic_long_unchecked_t asid_next;
32350 + atomic_long_unchecked_t asid_wrap;
32351 + atomic_long_unchecked_t asid_reuse;
32352 + atomic_long_unchecked_t intr;
32353 + atomic_long_unchecked_t intr_cbr;
32354 + atomic_long_unchecked_t intr_tfh;
32355 + atomic_long_unchecked_t intr_spurious;
32356 + atomic_long_unchecked_t intr_mm_lock_failed;
32357 + atomic_long_unchecked_t call_os;
32358 + atomic_long_unchecked_t call_os_wait_queue;
32359 + atomic_long_unchecked_t user_flush_tlb;
32360 + atomic_long_unchecked_t user_unload_context;
32361 + atomic_long_unchecked_t user_exception;
32362 + atomic_long_unchecked_t set_context_option;
32363 + atomic_long_unchecked_t check_context_retarget_intr;
32364 + atomic_long_unchecked_t check_context_unload;
32365 + atomic_long_unchecked_t tlb_dropin;
32366 + atomic_long_unchecked_t tlb_preload_page;
32367 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32368 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32369 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32370 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32371 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32372 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32373 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32374 + atomic_long_unchecked_t tfh_stale_on_fault;
32375 + atomic_long_unchecked_t mmu_invalidate_range;
32376 + atomic_long_unchecked_t mmu_invalidate_page;
32377 + atomic_long_unchecked_t flush_tlb;
32378 + atomic_long_unchecked_t flush_tlb_gru;
32379 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32380 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32381
32382 - atomic_long_t copy_gpa;
32383 - atomic_long_t read_gpa;
32384 + atomic_long_unchecked_t copy_gpa;
32385 + atomic_long_unchecked_t read_gpa;
32386
32387 - atomic_long_t mesq_receive;
32388 - atomic_long_t mesq_receive_none;
32389 - atomic_long_t mesq_send;
32390 - atomic_long_t mesq_send_failed;
32391 - atomic_long_t mesq_noop;
32392 - atomic_long_t mesq_send_unexpected_error;
32393 - atomic_long_t mesq_send_lb_overflow;
32394 - atomic_long_t mesq_send_qlimit_reached;
32395 - atomic_long_t mesq_send_amo_nacked;
32396 - atomic_long_t mesq_send_put_nacked;
32397 - atomic_long_t mesq_page_overflow;
32398 - atomic_long_t mesq_qf_locked;
32399 - atomic_long_t mesq_qf_noop_not_full;
32400 - atomic_long_t mesq_qf_switch_head_failed;
32401 - atomic_long_t mesq_qf_unexpected_error;
32402 - atomic_long_t mesq_noop_unexpected_error;
32403 - atomic_long_t mesq_noop_lb_overflow;
32404 - atomic_long_t mesq_noop_qlimit_reached;
32405 - atomic_long_t mesq_noop_amo_nacked;
32406 - atomic_long_t mesq_noop_put_nacked;
32407 - atomic_long_t mesq_noop_page_overflow;
32408 + atomic_long_unchecked_t mesq_receive;
32409 + atomic_long_unchecked_t mesq_receive_none;
32410 + atomic_long_unchecked_t mesq_send;
32411 + atomic_long_unchecked_t mesq_send_failed;
32412 + atomic_long_unchecked_t mesq_noop;
32413 + atomic_long_unchecked_t mesq_send_unexpected_error;
32414 + atomic_long_unchecked_t mesq_send_lb_overflow;
32415 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32416 + atomic_long_unchecked_t mesq_send_amo_nacked;
32417 + atomic_long_unchecked_t mesq_send_put_nacked;
32418 + atomic_long_unchecked_t mesq_page_overflow;
32419 + atomic_long_unchecked_t mesq_qf_locked;
32420 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32421 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32422 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32423 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32424 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32425 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32426 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32427 + atomic_long_unchecked_t mesq_noop_put_nacked;
32428 + atomic_long_unchecked_t mesq_noop_page_overflow;
32429
32430 };
32431
32432 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32433 tghop_invalidate, mcsop_last};
32434
32435 struct mcs_op_statistic {
32436 - atomic_long_t count;
32437 - atomic_long_t total;
32438 + atomic_long_unchecked_t count;
32439 + atomic_long_unchecked_t total;
32440 unsigned long max;
32441 };
32442
32443 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32444
32445 #define STAT(id) do { \
32446 if (gru_options & OPT_STATS) \
32447 - atomic_long_inc(&gru_stats.id); \
32448 + atomic_long_inc_unchecked(&gru_stats.id); \
32449 } while (0)
32450
32451 #ifdef CONFIG_SGI_GRU_DEBUG
32452 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32453 index 851b2f2..a4ec097 100644
32454 --- a/drivers/misc/sgi-xp/xp.h
32455 +++ b/drivers/misc/sgi-xp/xp.h
32456 @@ -289,7 +289,7 @@ struct xpc_interface {
32457 xpc_notify_func, void *);
32458 void (*received) (short, int, void *);
32459 enum xp_retval (*partid_to_nasids) (short, void *);
32460 -};
32461 +} __no_const;
32462
32463 extern struct xpc_interface xpc_interface;
32464
32465 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32466 index b94d5f7..7f494c5 100644
32467 --- a/drivers/misc/sgi-xp/xpc.h
32468 +++ b/drivers/misc/sgi-xp/xpc.h
32469 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32470 void (*received_payload) (struct xpc_channel *, void *);
32471 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32472 };
32473 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32474
32475 /* struct xpc_partition act_state values (for XPC HB) */
32476
32477 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32478 /* found in xpc_main.c */
32479 extern struct device *xpc_part;
32480 extern struct device *xpc_chan;
32481 -extern struct xpc_arch_operations xpc_arch_ops;
32482 +extern xpc_arch_operations_no_const xpc_arch_ops;
32483 extern int xpc_disengage_timelimit;
32484 extern int xpc_disengage_timedout;
32485 extern int xpc_activate_IRQ_rcvd;
32486 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32487 index 8d082b4..aa749ae 100644
32488 --- a/drivers/misc/sgi-xp/xpc_main.c
32489 +++ b/drivers/misc/sgi-xp/xpc_main.c
32490 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32491 .notifier_call = xpc_system_die,
32492 };
32493
32494 -struct xpc_arch_operations xpc_arch_ops;
32495 +xpc_arch_operations_no_const xpc_arch_ops;
32496
32497 /*
32498 * Timer function to enforce the timelimit on the partition disengage.
32499 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32500 index 6878a94..fe5c5f1 100644
32501 --- a/drivers/mmc/host/sdhci-pci.c
32502 +++ b/drivers/mmc/host/sdhci-pci.c
32503 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32504 .probe = via_probe,
32505 };
32506
32507 -static const struct pci_device_id pci_ids[] __devinitdata = {
32508 +static const struct pci_device_id pci_ids[] __devinitconst = {
32509 {
32510 .vendor = PCI_VENDOR_ID_RICOH,
32511 .device = PCI_DEVICE_ID_RICOH_R5C822,
32512 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32513 index e9fad91..0a7a16a 100644
32514 --- a/drivers/mtd/devices/doc2000.c
32515 +++ b/drivers/mtd/devices/doc2000.c
32516 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32517
32518 /* The ECC will not be calculated correctly if less than 512 is written */
32519 /* DBB-
32520 - if (len != 0x200 && eccbuf)
32521 + if (len != 0x200)
32522 printk(KERN_WARNING
32523 "ECC needs a full sector write (adr: %lx size %lx)\n",
32524 (long) to, (long) len);
32525 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32526 index a3f7a27..234016e 100644
32527 --- a/drivers/mtd/devices/doc2001.c
32528 +++ b/drivers/mtd/devices/doc2001.c
32529 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32530 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32531
32532 /* Don't allow read past end of device */
32533 - if (from >= this->totlen)
32534 + if (from >= this->totlen || !len)
32535 return -EINVAL;
32536
32537 /* Don't allow a single read to cross a 512-byte block boundary */
32538 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32539 index 3984d48..28aa897 100644
32540 --- a/drivers/mtd/nand/denali.c
32541 +++ b/drivers/mtd/nand/denali.c
32542 @@ -26,6 +26,7 @@
32543 #include <linux/pci.h>
32544 #include <linux/mtd/mtd.h>
32545 #include <linux/module.h>
32546 +#include <linux/slab.h>
32547
32548 #include "denali.h"
32549
32550 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32551 index ac40925..483b753 100644
32552 --- a/drivers/mtd/nftlmount.c
32553 +++ b/drivers/mtd/nftlmount.c
32554 @@ -24,6 +24,7 @@
32555 #include <asm/errno.h>
32556 #include <linux/delay.h>
32557 #include <linux/slab.h>
32558 +#include <linux/sched.h>
32559 #include <linux/mtd/mtd.h>
32560 #include <linux/mtd/nand.h>
32561 #include <linux/mtd/nftl.h>
32562 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32563 index 6c3fb5a..c542a81 100644
32564 --- a/drivers/mtd/ubi/build.c
32565 +++ b/drivers/mtd/ubi/build.c
32566 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32567 static int __init bytes_str_to_int(const char *str)
32568 {
32569 char *endp;
32570 - unsigned long result;
32571 + unsigned long result, scale = 1;
32572
32573 result = simple_strtoul(str, &endp, 0);
32574 if (str == endp || result >= INT_MAX) {
32575 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32576
32577 switch (*endp) {
32578 case 'G':
32579 - result *= 1024;
32580 + scale *= 1024;
32581 case 'M':
32582 - result *= 1024;
32583 + scale *= 1024;
32584 case 'K':
32585 - result *= 1024;
32586 + scale *= 1024;
32587 if (endp[1] == 'i' && endp[2] == 'B')
32588 endp += 2;
32589 case '\0':
32590 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32591 return -EINVAL;
32592 }
32593
32594 - return result;
32595 + if ((intoverflow_t)result*scale >= INT_MAX) {
32596 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32597 + str);
32598 + return -EINVAL;
32599 + }
32600 +
32601 + return result*scale;
32602 }
32603
32604 /**
32605 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32606 index 1feae59..c2a61d2 100644
32607 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
32608 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32609 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32610 */
32611
32612 #define ATL2_PARAM(X, desc) \
32613 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32614 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32615 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32616 MODULE_PARM_DESC(X, desc);
32617 #else
32618 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32619 index 9a517c2..a50cfcb 100644
32620 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32621 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32622 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32623
32624 int (*wait_comp)(struct bnx2x *bp,
32625 struct bnx2x_rx_mode_ramrod_params *p);
32626 -};
32627 +} __no_const;
32628
32629 /********************** Set multicast group ***********************************/
32630
32631 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32632 index 94b4bd0..73c02de 100644
32633 --- a/drivers/net/ethernet/broadcom/tg3.h
32634 +++ b/drivers/net/ethernet/broadcom/tg3.h
32635 @@ -134,6 +134,7 @@
32636 #define CHIPREV_ID_5750_A0 0x4000
32637 #define CHIPREV_ID_5750_A1 0x4001
32638 #define CHIPREV_ID_5750_A3 0x4003
32639 +#define CHIPREV_ID_5750_C1 0x4201
32640 #define CHIPREV_ID_5750_C2 0x4202
32641 #define CHIPREV_ID_5752_A0_HW 0x5000
32642 #define CHIPREV_ID_5752_A0 0x6000
32643 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32644 index c5f5479..2e8c260 100644
32645 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32646 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32647 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32648 */
32649 struct l2t_skb_cb {
32650 arp_failure_handler_func arp_failure_handler;
32651 -};
32652 +} __no_const;
32653
32654 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32655
32656 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32657 index 871bcaa..4043505 100644
32658 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
32659 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32660 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32661 for (i=0; i<ETH_ALEN; i++) {
32662 tmp.addr[i] = dev->dev_addr[i];
32663 }
32664 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32665 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32666 break;
32667
32668 case DE4X5_SET_HWADDR: /* Set the hardware address */
32669 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32670 spin_lock_irqsave(&lp->lock, flags);
32671 memcpy(&statbuf, &lp->pktStats, ioc->len);
32672 spin_unlock_irqrestore(&lp->lock, flags);
32673 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32674 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32675 return -EFAULT;
32676 break;
32677 }
32678 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32679 index 14d5b61..1398636 100644
32680 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
32681 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32682 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32683 {NULL}};
32684
32685
32686 -static const char *block_name[] __devinitdata = {
32687 +static const char *block_name[] __devinitconst = {
32688 "21140 non-MII",
32689 "21140 MII PHY",
32690 "21142 Serial PHY",
32691 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32692 index 4d01219..b58d26d 100644
32693 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32694 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32695 @@ -236,7 +236,7 @@ struct pci_id_info {
32696 int drv_flags; /* Driver use, intended as capability flags. */
32697 };
32698
32699 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32700 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32701 { /* Sometime a Level-One switch card. */
32702 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32703 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32704 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32705 index dcd7f7a..ecb7fb3 100644
32706 --- a/drivers/net/ethernet/dlink/sundance.c
32707 +++ b/drivers/net/ethernet/dlink/sundance.c
32708 @@ -218,7 +218,7 @@ enum {
32709 struct pci_id_info {
32710 const char *name;
32711 };
32712 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32713 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32714 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32715 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32716 {"D-Link DFE-580TX 4 port Server Adapter"},
32717 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32718 index bf266a0..e024af7 100644
32719 --- a/drivers/net/ethernet/emulex/benet/be_main.c
32720 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
32721 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32722
32723 if (wrapped)
32724 newacc += 65536;
32725 - ACCESS_ONCE(*acc) = newacc;
32726 + ACCESS_ONCE_RW(*acc) = newacc;
32727 }
32728
32729 void be_parse_stats(struct be_adapter *adapter)
32730 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32731 index 61d2bdd..7f1154a 100644
32732 --- a/drivers/net/ethernet/fealnx.c
32733 +++ b/drivers/net/ethernet/fealnx.c
32734 @@ -150,7 +150,7 @@ struct chip_info {
32735 int flags;
32736 };
32737
32738 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32739 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32740 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32741 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32742 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32743 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32744 index e1159e5..e18684d 100644
32745 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32746 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32747 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32748 {
32749 struct e1000_hw *hw = &adapter->hw;
32750 struct e1000_mac_info *mac = &hw->mac;
32751 - struct e1000_mac_operations *func = &mac->ops;
32752 + e1000_mac_operations_no_const *func = &mac->ops;
32753
32754 /* Set media type */
32755 switch (adapter->pdev->device) {
32756 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32757 index a3e65fd..f451444 100644
32758 --- a/drivers/net/ethernet/intel/e1000e/82571.c
32759 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
32760 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32761 {
32762 struct e1000_hw *hw = &adapter->hw;
32763 struct e1000_mac_info *mac = &hw->mac;
32764 - struct e1000_mac_operations *func = &mac->ops;
32765 + e1000_mac_operations_no_const *func = &mac->ops;
32766 u32 swsm = 0;
32767 u32 swsm2 = 0;
32768 bool force_clear_smbi = false;
32769 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32770 index 2967039..ca8c40c 100644
32771 --- a/drivers/net/ethernet/intel/e1000e/hw.h
32772 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
32773 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32774 void (*write_vfta)(struct e1000_hw *, u32, u32);
32775 s32 (*read_mac_addr)(struct e1000_hw *);
32776 };
32777 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32778
32779 /*
32780 * When to use various PHY register access functions:
32781 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32782 void (*power_up)(struct e1000_hw *);
32783 void (*power_down)(struct e1000_hw *);
32784 };
32785 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32786
32787 /* Function pointers for the NVM. */
32788 struct e1000_nvm_operations {
32789 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32790 s32 (*validate)(struct e1000_hw *);
32791 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32792 };
32793 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32794
32795 struct e1000_mac_info {
32796 - struct e1000_mac_operations ops;
32797 + e1000_mac_operations_no_const ops;
32798 u8 addr[ETH_ALEN];
32799 u8 perm_addr[ETH_ALEN];
32800
32801 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32802 };
32803
32804 struct e1000_phy_info {
32805 - struct e1000_phy_operations ops;
32806 + e1000_phy_operations_no_const ops;
32807
32808 enum e1000_phy_type type;
32809
32810 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32811 };
32812
32813 struct e1000_nvm_info {
32814 - struct e1000_nvm_operations ops;
32815 + e1000_nvm_operations_no_const ops;
32816
32817 enum e1000_nvm_type type;
32818 enum e1000_nvm_override override;
32819 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32820 index 4519a13..f97fcd0 100644
32821 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32822 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32823 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32824 s32 (*read_mac_addr)(struct e1000_hw *);
32825 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32826 };
32827 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32828
32829 struct e1000_phy_operations {
32830 s32 (*acquire)(struct e1000_hw *);
32831 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
32832 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32833 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32834 };
32835 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32836
32837 struct e1000_nvm_operations {
32838 s32 (*acquire)(struct e1000_hw *);
32839 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32840 s32 (*update)(struct e1000_hw *);
32841 s32 (*validate)(struct e1000_hw *);
32842 };
32843 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32844
32845 struct e1000_info {
32846 s32 (*get_invariants)(struct e1000_hw *);
32847 @@ -350,7 +353,7 @@ struct e1000_info {
32848 extern const struct e1000_info e1000_82575_info;
32849
32850 struct e1000_mac_info {
32851 - struct e1000_mac_operations ops;
32852 + e1000_mac_operations_no_const ops;
32853
32854 u8 addr[6];
32855 u8 perm_addr[6];
32856 @@ -388,7 +391,7 @@ struct e1000_mac_info {
32857 };
32858
32859 struct e1000_phy_info {
32860 - struct e1000_phy_operations ops;
32861 + e1000_phy_operations_no_const ops;
32862
32863 enum e1000_phy_type type;
32864
32865 @@ -423,7 +426,7 @@ struct e1000_phy_info {
32866 };
32867
32868 struct e1000_nvm_info {
32869 - struct e1000_nvm_operations ops;
32870 + e1000_nvm_operations_no_const ops;
32871 enum e1000_nvm_type type;
32872 enum e1000_nvm_override override;
32873
32874 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32875 s32 (*check_for_ack)(struct e1000_hw *, u16);
32876 s32 (*check_for_rst)(struct e1000_hw *, u16);
32877 };
32878 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32879
32880 struct e1000_mbx_stats {
32881 u32 msgs_tx;
32882 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32883 };
32884
32885 struct e1000_mbx_info {
32886 - struct e1000_mbx_operations ops;
32887 + e1000_mbx_operations_no_const ops;
32888 struct e1000_mbx_stats stats;
32889 u32 timeout;
32890 u32 usec_delay;
32891 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32892 index d7ed58f..64cde36 100644
32893 --- a/drivers/net/ethernet/intel/igbvf/vf.h
32894 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
32895 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
32896 s32 (*read_mac_addr)(struct e1000_hw *);
32897 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32898 };
32899 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32900
32901 struct e1000_mac_info {
32902 - struct e1000_mac_operations ops;
32903 + e1000_mac_operations_no_const ops;
32904 u8 addr[6];
32905 u8 perm_addr[6];
32906
32907 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32908 s32 (*check_for_ack)(struct e1000_hw *);
32909 s32 (*check_for_rst)(struct e1000_hw *);
32910 };
32911 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32912
32913 struct e1000_mbx_stats {
32914 u32 msgs_tx;
32915 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32916 };
32917
32918 struct e1000_mbx_info {
32919 - struct e1000_mbx_operations ops;
32920 + e1000_mbx_operations_no_const ops;
32921 struct e1000_mbx_stats stats;
32922 u32 timeout;
32923 u32 usec_delay;
32924 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32925 index 6c5cca8..de8ef63 100644
32926 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32927 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32928 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32929 s32 (*update_checksum)(struct ixgbe_hw *);
32930 u16 (*calc_checksum)(struct ixgbe_hw *);
32931 };
32932 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32933
32934 struct ixgbe_mac_operations {
32935 s32 (*init_hw)(struct ixgbe_hw *);
32936 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32937 /* Manageability interface */
32938 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32939 };
32940 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32941
32942 struct ixgbe_phy_operations {
32943 s32 (*identify)(struct ixgbe_hw *);
32944 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32945 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32946 s32 (*check_overtemp)(struct ixgbe_hw *);
32947 };
32948 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32949
32950 struct ixgbe_eeprom_info {
32951 - struct ixgbe_eeprom_operations ops;
32952 + ixgbe_eeprom_operations_no_const ops;
32953 enum ixgbe_eeprom_type type;
32954 u32 semaphore_delay;
32955 u16 word_size;
32956 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32957
32958 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32959 struct ixgbe_mac_info {
32960 - struct ixgbe_mac_operations ops;
32961 + ixgbe_mac_operations_no_const ops;
32962 enum ixgbe_mac_type type;
32963 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32964 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32965 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32966 };
32967
32968 struct ixgbe_phy_info {
32969 - struct ixgbe_phy_operations ops;
32970 + ixgbe_phy_operations_no_const ops;
32971 struct mdio_if_info mdio;
32972 enum ixgbe_phy_type type;
32973 u32 id;
32974 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32975 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32976 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32977 };
32978 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32979
32980 struct ixgbe_mbx_stats {
32981 u32 msgs_tx;
32982 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32983 };
32984
32985 struct ixgbe_mbx_info {
32986 - struct ixgbe_mbx_operations ops;
32987 + ixgbe_mbx_operations_no_const ops;
32988 struct ixgbe_mbx_stats stats;
32989 u32 timeout;
32990 u32 usec_delay;
32991 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32992 index 10306b4..28df758 100644
32993 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32994 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32995 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32996 s32 (*clear_vfta)(struct ixgbe_hw *);
32997 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32998 };
32999 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33000
33001 enum ixgbe_mac_type {
33002 ixgbe_mac_unknown = 0,
33003 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
33004 };
33005
33006 struct ixgbe_mac_info {
33007 - struct ixgbe_mac_operations ops;
33008 + ixgbe_mac_operations_no_const ops;
33009 u8 addr[6];
33010 u8 perm_addr[6];
33011
33012 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33013 s32 (*check_for_ack)(struct ixgbe_hw *);
33014 s32 (*check_for_rst)(struct ixgbe_hw *);
33015 };
33016 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33017
33018 struct ixgbe_mbx_stats {
33019 u32 msgs_tx;
33020 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33021 };
33022
33023 struct ixgbe_mbx_info {
33024 - struct ixgbe_mbx_operations ops;
33025 + ixgbe_mbx_operations_no_const ops;
33026 struct ixgbe_mbx_stats stats;
33027 u32 timeout;
33028 u32 udelay;
33029 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
33030 index 94bbc85..78c12e6 100644
33031 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
33032 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
33033 @@ -40,6 +40,7 @@
33034 #include <linux/dma-mapping.h>
33035 #include <linux/slab.h>
33036 #include <linux/io-mapping.h>
33037 +#include <linux/sched.h>
33038
33039 #include <linux/mlx4/device.h>
33040 #include <linux/mlx4/doorbell.h>
33041 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33042 index 5046a64..71ca936 100644
33043 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
33044 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33045 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
33046 void (*link_down)(struct __vxge_hw_device *devh);
33047 void (*crit_err)(struct __vxge_hw_device *devh,
33048 enum vxge_hw_event type, u64 ext_data);
33049 -};
33050 +} __no_const;
33051
33052 /*
33053 * struct __vxge_hw_blockpool_entry - Block private data structure
33054 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33055 index 4a518a3..936b334 100644
33056 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33057 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33058 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
33059 struct vxge_hw_mempool_dma *dma_object,
33060 u32 index,
33061 u32 is_last);
33062 -};
33063 +} __no_const;
33064
33065 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
33066 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
33067 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
33068 index c8f47f1..5da9840 100644
33069 --- a/drivers/net/ethernet/realtek/r8169.c
33070 +++ b/drivers/net/ethernet/realtek/r8169.c
33071 @@ -698,17 +698,17 @@ struct rtl8169_private {
33072 struct mdio_ops {
33073 void (*write)(void __iomem *, int, int);
33074 int (*read)(void __iomem *, int);
33075 - } mdio_ops;
33076 + } __no_const mdio_ops;
33077
33078 struct pll_power_ops {
33079 void (*down)(struct rtl8169_private *);
33080 void (*up)(struct rtl8169_private *);
33081 - } pll_power_ops;
33082 + } __no_const pll_power_ops;
33083
33084 struct jumbo_ops {
33085 void (*enable)(struct rtl8169_private *);
33086 void (*disable)(struct rtl8169_private *);
33087 - } jumbo_ops;
33088 + } __no_const jumbo_ops;
33089
33090 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
33091 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33092 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
33093 index 1b4658c..a30dabb 100644
33094 --- a/drivers/net/ethernet/sis/sis190.c
33095 +++ b/drivers/net/ethernet/sis/sis190.c
33096 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
33097 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
33098 struct net_device *dev)
33099 {
33100 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
33101 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
33102 struct sis190_private *tp = netdev_priv(dev);
33103 struct pci_dev *isa_bridge;
33104 u8 reg, tmp8;
33105 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
33106 index edfa15d..002bfa9 100644
33107 --- a/drivers/net/ppp/ppp_generic.c
33108 +++ b/drivers/net/ppp/ppp_generic.c
33109 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33110 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
33111 struct ppp_stats stats;
33112 struct ppp_comp_stats cstats;
33113 - char *vers;
33114
33115 switch (cmd) {
33116 case SIOCGPPPSTATS:
33117 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33118 break;
33119
33120 case SIOCGPPPVER:
33121 - vers = PPP_VERSION;
33122 - if (copy_to_user(addr, vers, strlen(vers) + 1))
33123 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
33124 break;
33125 err = 0;
33126 break;
33127 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
33128 index 515f122..41dd273 100644
33129 --- a/drivers/net/tokenring/abyss.c
33130 +++ b/drivers/net/tokenring/abyss.c
33131 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
33132
33133 static int __init abyss_init (void)
33134 {
33135 - abyss_netdev_ops = tms380tr_netdev_ops;
33136 + pax_open_kernel();
33137 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33138
33139 - abyss_netdev_ops.ndo_open = abyss_open;
33140 - abyss_netdev_ops.ndo_stop = abyss_close;
33141 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
33142 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
33143 + pax_close_kernel();
33144
33145 return pci_register_driver(&abyss_driver);
33146 }
33147 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
33148 index 6153cfd..cf69c1c 100644
33149 --- a/drivers/net/tokenring/madgemc.c
33150 +++ b/drivers/net/tokenring/madgemc.c
33151 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
33152
33153 static int __init madgemc_init (void)
33154 {
33155 - madgemc_netdev_ops = tms380tr_netdev_ops;
33156 - madgemc_netdev_ops.ndo_open = madgemc_open;
33157 - madgemc_netdev_ops.ndo_stop = madgemc_close;
33158 + pax_open_kernel();
33159 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33160 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
33161 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
33162 + pax_close_kernel();
33163
33164 return mca_register_driver (&madgemc_driver);
33165 }
33166 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
33167 index 8d362e6..f91cc52 100644
33168 --- a/drivers/net/tokenring/proteon.c
33169 +++ b/drivers/net/tokenring/proteon.c
33170 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
33171 struct platform_device *pdev;
33172 int i, num = 0, err = 0;
33173
33174 - proteon_netdev_ops = tms380tr_netdev_ops;
33175 - proteon_netdev_ops.ndo_open = proteon_open;
33176 - proteon_netdev_ops.ndo_stop = tms380tr_close;
33177 + pax_open_kernel();
33178 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33179 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
33180 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
33181 + pax_close_kernel();
33182
33183 err = platform_driver_register(&proteon_driver);
33184 if (err)
33185 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
33186 index 46db5c5..37c1536 100644
33187 --- a/drivers/net/tokenring/skisa.c
33188 +++ b/drivers/net/tokenring/skisa.c
33189 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
33190 struct platform_device *pdev;
33191 int i, num = 0, err = 0;
33192
33193 - sk_isa_netdev_ops = tms380tr_netdev_ops;
33194 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
33195 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33196 + pax_open_kernel();
33197 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33198 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33199 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33200 + pax_close_kernel();
33201
33202 err = platform_driver_register(&sk_isa_driver);
33203 if (err)
33204 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33205 index 304fe78..db112fa 100644
33206 --- a/drivers/net/usb/hso.c
33207 +++ b/drivers/net/usb/hso.c
33208 @@ -71,7 +71,7 @@
33209 #include <asm/byteorder.h>
33210 #include <linux/serial_core.h>
33211 #include <linux/serial.h>
33212 -
33213 +#include <asm/local.h>
33214
33215 #define MOD_AUTHOR "Option Wireless"
33216 #define MOD_DESCRIPTION "USB High Speed Option driver"
33217 @@ -257,7 +257,7 @@ struct hso_serial {
33218
33219 /* from usb_serial_port */
33220 struct tty_struct *tty;
33221 - int open_count;
33222 + local_t open_count;
33223 spinlock_t serial_lock;
33224
33225 int (*write_data) (struct hso_serial *serial);
33226 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33227 struct urb *urb;
33228
33229 urb = serial->rx_urb[0];
33230 - if (serial->open_count > 0) {
33231 + if (local_read(&serial->open_count) > 0) {
33232 count = put_rxbuf_data(urb, serial);
33233 if (count == -1)
33234 return;
33235 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33236 DUMP1(urb->transfer_buffer, urb->actual_length);
33237
33238 /* Anyone listening? */
33239 - if (serial->open_count == 0)
33240 + if (local_read(&serial->open_count) == 0)
33241 return;
33242
33243 if (status == 0) {
33244 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33245 spin_unlock_irq(&serial->serial_lock);
33246
33247 /* check for port already opened, if not set the termios */
33248 - serial->open_count++;
33249 - if (serial->open_count == 1) {
33250 + if (local_inc_return(&serial->open_count) == 1) {
33251 serial->rx_state = RX_IDLE;
33252 /* Force default termio settings */
33253 _hso_serial_set_termios(tty, NULL);
33254 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33255 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33256 if (result) {
33257 hso_stop_serial_device(serial->parent);
33258 - serial->open_count--;
33259 + local_dec(&serial->open_count);
33260 kref_put(&serial->parent->ref, hso_serial_ref_free);
33261 }
33262 } else {
33263 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33264
33265 /* reset the rts and dtr */
33266 /* do the actual close */
33267 - serial->open_count--;
33268 + local_dec(&serial->open_count);
33269
33270 - if (serial->open_count <= 0) {
33271 - serial->open_count = 0;
33272 + if (local_read(&serial->open_count) <= 0) {
33273 + local_set(&serial->open_count, 0);
33274 spin_lock_irq(&serial->serial_lock);
33275 if (serial->tty == tty) {
33276 serial->tty->driver_data = NULL;
33277 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33278
33279 /* the actual setup */
33280 spin_lock_irqsave(&serial->serial_lock, flags);
33281 - if (serial->open_count)
33282 + if (local_read(&serial->open_count))
33283 _hso_serial_set_termios(tty, old);
33284 else
33285 tty->termios = old;
33286 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33287 D1("Pending read interrupt on port %d\n", i);
33288 spin_lock(&serial->serial_lock);
33289 if (serial->rx_state == RX_IDLE &&
33290 - serial->open_count > 0) {
33291 + local_read(&serial->open_count) > 0) {
33292 /* Setup and send a ctrl req read on
33293 * port i */
33294 if (!serial->rx_urb_filled[0]) {
33295 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33296 /* Start all serial ports */
33297 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33298 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33299 - if (dev2ser(serial_table[i])->open_count) {
33300 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33301 result =
33302 hso_start_serial_device(serial_table[i], GFP_NOIO);
33303 hso_kick_transmit(dev2ser(serial_table[i]));
33304 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33305 index e662cbc..8d4a102 100644
33306 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33307 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33308 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33309 * Return with error code if any of the queue indices
33310 * is out of range
33311 */
33312 - if (p->ring_index[i] < 0 ||
33313 - p->ring_index[i] >= adapter->num_rx_queues)
33314 + if (p->ring_index[i] >= adapter->num_rx_queues)
33315 return -EINVAL;
33316 }
33317
33318 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33319 index 0f9ee46..e2d6e65 100644
33320 --- a/drivers/net/wireless/ath/ath.h
33321 +++ b/drivers/net/wireless/ath/ath.h
33322 @@ -119,6 +119,7 @@ struct ath_ops {
33323 void (*write_flush) (void *);
33324 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33325 };
33326 +typedef struct ath_ops __no_const ath_ops_no_const;
33327
33328 struct ath_common;
33329 struct ath_bus_ops;
33330 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33331 index b592016..fe47870 100644
33332 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33333 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33334 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33335 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33336 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33337
33338 - ACCESS_ONCE(ads->ds_link) = i->link;
33339 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33340 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
33341 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33342
33343 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33344 ctl6 = SM(i->keytype, AR_EncrType);
33345 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33346
33347 if ((i->is_first || i->is_last) &&
33348 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33349 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33350 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33351 | set11nTries(i->rates, 1)
33352 | set11nTries(i->rates, 2)
33353 | set11nTries(i->rates, 3)
33354 | (i->dur_update ? AR_DurUpdateEna : 0)
33355 | SM(0, AR_BurstDur);
33356
33357 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33358 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33359 | set11nRate(i->rates, 1)
33360 | set11nRate(i->rates, 2)
33361 | set11nRate(i->rates, 3);
33362 } else {
33363 - ACCESS_ONCE(ads->ds_ctl2) = 0;
33364 - ACCESS_ONCE(ads->ds_ctl3) = 0;
33365 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33366 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33367 }
33368
33369 if (!i->is_first) {
33370 - ACCESS_ONCE(ads->ds_ctl0) = 0;
33371 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33372 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33373 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33374 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33375 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33376 return;
33377 }
33378
33379 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33380 break;
33381 }
33382
33383 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33384 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33385 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33386 | SM(i->txpower, AR_XmitPower)
33387 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33388 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33389 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33390 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33391
33392 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33393 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33394 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33395 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33396
33397 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33398 return;
33399
33400 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33401 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33402 | set11nPktDurRTSCTS(i->rates, 1);
33403
33404 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33405 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33406 | set11nPktDurRTSCTS(i->rates, 3);
33407
33408 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33409 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33410 | set11nRateFlags(i->rates, 1)
33411 | set11nRateFlags(i->rates, 2)
33412 | set11nRateFlags(i->rates, 3)
33413 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33414 index f5ae3c6..7936af3 100644
33415 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33416 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33417 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33418 (i->qcu << AR_TxQcuNum_S) | 0x17;
33419
33420 checksum += val;
33421 - ACCESS_ONCE(ads->info) = val;
33422 + ACCESS_ONCE_RW(ads->info) = val;
33423
33424 checksum += i->link;
33425 - ACCESS_ONCE(ads->link) = i->link;
33426 + ACCESS_ONCE_RW(ads->link) = i->link;
33427
33428 checksum += i->buf_addr[0];
33429 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33430 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33431 checksum += i->buf_addr[1];
33432 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33433 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33434 checksum += i->buf_addr[2];
33435 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33436 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33437 checksum += i->buf_addr[3];
33438 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33439 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33440
33441 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33442 - ACCESS_ONCE(ads->ctl3) = val;
33443 + ACCESS_ONCE_RW(ads->ctl3) = val;
33444 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33445 - ACCESS_ONCE(ads->ctl5) = val;
33446 + ACCESS_ONCE_RW(ads->ctl5) = val;
33447 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33448 - ACCESS_ONCE(ads->ctl7) = val;
33449 + ACCESS_ONCE_RW(ads->ctl7) = val;
33450 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33451 - ACCESS_ONCE(ads->ctl9) = val;
33452 + ACCESS_ONCE_RW(ads->ctl9) = val;
33453
33454 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33455 - ACCESS_ONCE(ads->ctl10) = checksum;
33456 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
33457
33458 if (i->is_first || i->is_last) {
33459 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33460 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33461 | set11nTries(i->rates, 1)
33462 | set11nTries(i->rates, 2)
33463 | set11nTries(i->rates, 3)
33464 | (i->dur_update ? AR_DurUpdateEna : 0)
33465 | SM(0, AR_BurstDur);
33466
33467 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33468 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33469 | set11nRate(i->rates, 1)
33470 | set11nRate(i->rates, 2)
33471 | set11nRate(i->rates, 3);
33472 } else {
33473 - ACCESS_ONCE(ads->ctl13) = 0;
33474 - ACCESS_ONCE(ads->ctl14) = 0;
33475 + ACCESS_ONCE_RW(ads->ctl13) = 0;
33476 + ACCESS_ONCE_RW(ads->ctl14) = 0;
33477 }
33478
33479 ads->ctl20 = 0;
33480 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33481
33482 ctl17 = SM(i->keytype, AR_EncrType);
33483 if (!i->is_first) {
33484 - ACCESS_ONCE(ads->ctl11) = 0;
33485 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33486 - ACCESS_ONCE(ads->ctl15) = 0;
33487 - ACCESS_ONCE(ads->ctl16) = 0;
33488 - ACCESS_ONCE(ads->ctl17) = ctl17;
33489 - ACCESS_ONCE(ads->ctl18) = 0;
33490 - ACCESS_ONCE(ads->ctl19) = 0;
33491 + ACCESS_ONCE_RW(ads->ctl11) = 0;
33492 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33493 + ACCESS_ONCE_RW(ads->ctl15) = 0;
33494 + ACCESS_ONCE_RW(ads->ctl16) = 0;
33495 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33496 + ACCESS_ONCE_RW(ads->ctl18) = 0;
33497 + ACCESS_ONCE_RW(ads->ctl19) = 0;
33498 return;
33499 }
33500
33501 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33502 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33503 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33504 | SM(i->txpower, AR_XmitPower)
33505 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33506 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33507 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33508 ctl12 |= SM(val, AR_PAPRDChainMask);
33509
33510 - ACCESS_ONCE(ads->ctl12) = ctl12;
33511 - ACCESS_ONCE(ads->ctl17) = ctl17;
33512 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33513 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33514
33515 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33516 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33517 | set11nPktDurRTSCTS(i->rates, 1);
33518
33519 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33520 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33521 | set11nPktDurRTSCTS(i->rates, 3);
33522
33523 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33524 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33525 | set11nRateFlags(i->rates, 1)
33526 | set11nRateFlags(i->rates, 2)
33527 | set11nRateFlags(i->rates, 3)
33528 | SM(i->rtscts_rate, AR_RTSCTSRate);
33529
33530 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33531 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33532 }
33533
33534 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33535 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33536 index f389b3c..7359e18 100644
33537 --- a/drivers/net/wireless/ath/ath9k/hw.h
33538 +++ b/drivers/net/wireless/ath/ath9k/hw.h
33539 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33540
33541 /* ANI */
33542 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33543 -};
33544 +} __no_const;
33545
33546 /**
33547 * struct ath_hw_ops - callbacks used by hardware code and driver code
33548 @@ -635,7 +635,7 @@ struct ath_hw_ops {
33549 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33550 struct ath_hw_antcomb_conf *antconf);
33551
33552 -};
33553 +} __no_const;
33554
33555 struct ath_nf_limits {
33556 s16 max;
33557 @@ -655,7 +655,7 @@ enum ath_cal_list {
33558 #define AH_FASTCC 0x4
33559
33560 struct ath_hw {
33561 - struct ath_ops reg_ops;
33562 + ath_ops_no_const reg_ops;
33563
33564 struct ieee80211_hw *hw;
33565 struct ath_common common;
33566 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33567 index bea8524..c677c06 100644
33568 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33569 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33570 @@ -547,7 +547,7 @@ struct phy_func_ptr {
33571 void (*carrsuppr)(struct brcms_phy *);
33572 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33573 void (*detach)(struct brcms_phy *);
33574 -};
33575 +} __no_const;
33576
33577 struct brcms_phy {
33578 struct brcms_phy_pub pubpi_ro;
33579 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33580 index 05f2ad1..ae00eea 100644
33581 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33582 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33583 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33584 */
33585 if (iwl3945_mod_params.disable_hw_scan) {
33586 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33587 - iwl3945_hw_ops.hw_scan = NULL;
33588 + pax_open_kernel();
33589 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33590 + pax_close_kernel();
33591 }
33592
33593 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33594 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33595 index 69a77e2..552b42c 100644
33596 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33597 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33598 @@ -71,8 +71,8 @@ do { \
33599 } while (0)
33600
33601 #else
33602 -#define IWL_DEBUG(m, level, fmt, args...)
33603 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33604 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33605 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33606 #define iwl_print_hex_dump(m, level, p, len)
33607 #endif /* CONFIG_IWLWIFI_DEBUG */
33608
33609 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33610 index 523ad55..f8c5dc5 100644
33611 --- a/drivers/net/wireless/mac80211_hwsim.c
33612 +++ b/drivers/net/wireless/mac80211_hwsim.c
33613 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33614 return -EINVAL;
33615
33616 if (fake_hw_scan) {
33617 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33618 - mac80211_hwsim_ops.sw_scan_start = NULL;
33619 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33620 + pax_open_kernel();
33621 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33622 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33623 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33624 + pax_close_kernel();
33625 }
33626
33627 spin_lock_init(&hwsim_radio_lock);
33628 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33629 index 30f138b..c904585 100644
33630 --- a/drivers/net/wireless/mwifiex/main.h
33631 +++ b/drivers/net/wireless/mwifiex/main.h
33632 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33633 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33634 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33635 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33636 -};
33637 +} __no_const;
33638
33639 struct mwifiex_adapter {
33640 u8 iface_type;
33641 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33642 index 0c13840..a5c3ed6 100644
33643 --- a/drivers/net/wireless/rndis_wlan.c
33644 +++ b/drivers/net/wireless/rndis_wlan.c
33645 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33646
33647 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33648
33649 - if (rts_threshold < 0 || rts_threshold > 2347)
33650 + if (rts_threshold > 2347)
33651 rts_threshold = 2347;
33652
33653 tmp = cpu_to_le32(rts_threshold);
33654 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33655 index a77f1bb..c608b2b 100644
33656 --- a/drivers/net/wireless/wl1251/wl1251.h
33657 +++ b/drivers/net/wireless/wl1251/wl1251.h
33658 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33659 void (*reset)(struct wl1251 *wl);
33660 void (*enable_irq)(struct wl1251 *wl);
33661 void (*disable_irq)(struct wl1251 *wl);
33662 -};
33663 +} __no_const;
33664
33665 struct wl1251 {
33666 struct ieee80211_hw *hw;
33667 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33668 index f34b5b2..b5abb9f 100644
33669 --- a/drivers/oprofile/buffer_sync.c
33670 +++ b/drivers/oprofile/buffer_sync.c
33671 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33672 if (cookie == NO_COOKIE)
33673 offset = pc;
33674 if (cookie == INVALID_COOKIE) {
33675 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33676 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33677 offset = pc;
33678 }
33679 if (cookie != last_cookie) {
33680 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33681 /* add userspace sample */
33682
33683 if (!mm) {
33684 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33685 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33686 return 0;
33687 }
33688
33689 cookie = lookup_dcookie(mm, s->eip, &offset);
33690
33691 if (cookie == INVALID_COOKIE) {
33692 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33693 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33694 return 0;
33695 }
33696
33697 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33698 /* ignore backtraces if failed to add a sample */
33699 if (state == sb_bt_start) {
33700 state = sb_bt_ignore;
33701 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33702 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33703 }
33704 }
33705 release_mm(mm);
33706 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33707 index c0cc4e7..44d4e54 100644
33708 --- a/drivers/oprofile/event_buffer.c
33709 +++ b/drivers/oprofile/event_buffer.c
33710 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33711 }
33712
33713 if (buffer_pos == buffer_size) {
33714 - atomic_inc(&oprofile_stats.event_lost_overflow);
33715 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33716 return;
33717 }
33718
33719 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33720 index f8c752e..28bf4fc 100644
33721 --- a/drivers/oprofile/oprof.c
33722 +++ b/drivers/oprofile/oprof.c
33723 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33724 if (oprofile_ops.switch_events())
33725 return;
33726
33727 - atomic_inc(&oprofile_stats.multiplex_counter);
33728 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33729 start_switch_worker();
33730 }
33731
33732 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33733 index 917d28e..d62d981 100644
33734 --- a/drivers/oprofile/oprofile_stats.c
33735 +++ b/drivers/oprofile/oprofile_stats.c
33736 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33737 cpu_buf->sample_invalid_eip = 0;
33738 }
33739
33740 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33741 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33742 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33743 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33744 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33745 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33746 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33747 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33748 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33749 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33750 }
33751
33752
33753 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33754 index 38b6fc0..b5cbfce 100644
33755 --- a/drivers/oprofile/oprofile_stats.h
33756 +++ b/drivers/oprofile/oprofile_stats.h
33757 @@ -13,11 +13,11 @@
33758 #include <linux/atomic.h>
33759
33760 struct oprofile_stat_struct {
33761 - atomic_t sample_lost_no_mm;
33762 - atomic_t sample_lost_no_mapping;
33763 - atomic_t bt_lost_no_mapping;
33764 - atomic_t event_lost_overflow;
33765 - atomic_t multiplex_counter;
33766 + atomic_unchecked_t sample_lost_no_mm;
33767 + atomic_unchecked_t sample_lost_no_mapping;
33768 + atomic_unchecked_t bt_lost_no_mapping;
33769 + atomic_unchecked_t event_lost_overflow;
33770 + atomic_unchecked_t multiplex_counter;
33771 };
33772
33773 extern struct oprofile_stat_struct oprofile_stats;
33774 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33775 index 2f0aa0f..90fab02 100644
33776 --- a/drivers/oprofile/oprofilefs.c
33777 +++ b/drivers/oprofile/oprofilefs.c
33778 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33779
33780
33781 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33782 - char const *name, atomic_t *val)
33783 + char const *name, atomic_unchecked_t *val)
33784 {
33785 return __oprofilefs_create_file(sb, root, name,
33786 &atomic_ro_fops, 0444, val);
33787 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33788 index 3f56bc0..707d642 100644
33789 --- a/drivers/parport/procfs.c
33790 +++ b/drivers/parport/procfs.c
33791 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33792
33793 *ppos += len;
33794
33795 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33796 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33797 }
33798
33799 #ifdef CONFIG_PARPORT_1284
33800 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33801
33802 *ppos += len;
33803
33804 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33805 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33806 }
33807 #endif /* IEEE1284.3 support. */
33808
33809 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33810 index 9fff878..ad0ad53 100644
33811 --- a/drivers/pci/hotplug/cpci_hotplug.h
33812 +++ b/drivers/pci/hotplug/cpci_hotplug.h
33813 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33814 int (*hardware_test) (struct slot* slot, u32 value);
33815 u8 (*get_power) (struct slot* slot);
33816 int (*set_power) (struct slot* slot, int value);
33817 -};
33818 +} __no_const;
33819
33820 struct cpci_hp_controller {
33821 unsigned int irq;
33822 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33823 index 76ba8a1..20ca857 100644
33824 --- a/drivers/pci/hotplug/cpqphp_nvram.c
33825 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
33826 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33827
33828 void compaq_nvram_init (void __iomem *rom_start)
33829 {
33830 +
33831 +#ifndef CONFIG_PAX_KERNEXEC
33832 if (rom_start) {
33833 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33834 }
33835 +#endif
33836 +
33837 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33838
33839 /* initialize our int15 lock */
33840 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33841 index 1cfbf22..be96487 100644
33842 --- a/drivers/pci/pcie/aspm.c
33843 +++ b/drivers/pci/pcie/aspm.c
33844 @@ -27,9 +27,9 @@
33845 #define MODULE_PARAM_PREFIX "pcie_aspm."
33846
33847 /* Note: those are not register definitions */
33848 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33849 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33850 -#define ASPM_STATE_L1 (4) /* L1 state */
33851 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33852 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33853 +#define ASPM_STATE_L1 (4U) /* L1 state */
33854 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33855 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33856
33857 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33858 index dfee1b3..a454fb6 100644
33859 --- a/drivers/pci/probe.c
33860 +++ b/drivers/pci/probe.c
33861 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33862 u32 l, sz, mask;
33863 u16 orig_cmd;
33864
33865 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33866 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33867
33868 if (!dev->mmio_always_on) {
33869 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33870 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33871 index 27911b5..5b6db88 100644
33872 --- a/drivers/pci/proc.c
33873 +++ b/drivers/pci/proc.c
33874 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33875 static int __init pci_proc_init(void)
33876 {
33877 struct pci_dev *dev = NULL;
33878 +
33879 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33880 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33881 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33882 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33883 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33884 +#endif
33885 +#else
33886 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33887 +#endif
33888 proc_create("devices", 0, proc_bus_pci_dir,
33889 &proc_bus_pci_dev_operations);
33890 proc_initialized = 1;
33891 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33892 index 7b82868..b9344c9 100644
33893 --- a/drivers/platform/x86/thinkpad_acpi.c
33894 +++ b/drivers/platform/x86/thinkpad_acpi.c
33895 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33896 return 0;
33897 }
33898
33899 -void static hotkey_mask_warn_incomplete_mask(void)
33900 +static void hotkey_mask_warn_incomplete_mask(void)
33901 {
33902 /* log only what the user can fix... */
33903 const u32 wantedmask = hotkey_driver_mask &
33904 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33905 }
33906 }
33907
33908 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33909 - struct tp_nvram_state *newn,
33910 - const u32 event_mask)
33911 -{
33912 -
33913 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33914 do { \
33915 if ((event_mask & (1 << __scancode)) && \
33916 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33917 tpacpi_hotkey_send_key(__scancode); \
33918 } while (0)
33919
33920 - void issue_volchange(const unsigned int oldvol,
33921 - const unsigned int newvol)
33922 - {
33923 - unsigned int i = oldvol;
33924 +static void issue_volchange(const unsigned int oldvol,
33925 + const unsigned int newvol,
33926 + const u32 event_mask)
33927 +{
33928 + unsigned int i = oldvol;
33929
33930 - while (i > newvol) {
33931 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33932 - i--;
33933 - }
33934 - while (i < newvol) {
33935 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33936 - i++;
33937 - }
33938 + while (i > newvol) {
33939 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33940 + i--;
33941 }
33942 + while (i < newvol) {
33943 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33944 + i++;
33945 + }
33946 +}
33947
33948 - void issue_brightnesschange(const unsigned int oldbrt,
33949 - const unsigned int newbrt)
33950 - {
33951 - unsigned int i = oldbrt;
33952 +static void issue_brightnesschange(const unsigned int oldbrt,
33953 + const unsigned int newbrt,
33954 + const u32 event_mask)
33955 +{
33956 + unsigned int i = oldbrt;
33957
33958 - while (i > newbrt) {
33959 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33960 - i--;
33961 - }
33962 - while (i < newbrt) {
33963 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33964 - i++;
33965 - }
33966 + while (i > newbrt) {
33967 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33968 + i--;
33969 + }
33970 + while (i < newbrt) {
33971 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33972 + i++;
33973 }
33974 +}
33975
33976 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33977 + struct tp_nvram_state *newn,
33978 + const u32 event_mask)
33979 +{
33980 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33981 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33982 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33983 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33984 oldn->volume_level != newn->volume_level) {
33985 /* recently muted, or repeated mute keypress, or
33986 * multiple presses ending in mute */
33987 - issue_volchange(oldn->volume_level, newn->volume_level);
33988 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33989 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33990 }
33991 } else {
33992 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33993 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33994 }
33995 if (oldn->volume_level != newn->volume_level) {
33996 - issue_volchange(oldn->volume_level, newn->volume_level);
33997 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33998 } else if (oldn->volume_toggle != newn->volume_toggle) {
33999 /* repeated vol up/down keypress at end of scale ? */
34000 if (newn->volume_level == 0)
34001 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34002 /* handle brightness */
34003 if (oldn->brightness_level != newn->brightness_level) {
34004 issue_brightnesschange(oldn->brightness_level,
34005 - newn->brightness_level);
34006 + newn->brightness_level,
34007 + event_mask);
34008 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
34009 /* repeated key presses that didn't change state */
34010 if (newn->brightness_level == 0)
34011 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34012 && !tp_features.bright_unkfw)
34013 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34014 }
34015 +}
34016
34017 #undef TPACPI_COMPARE_KEY
34018 #undef TPACPI_MAY_SEND_KEY
34019 -}
34020
34021 /*
34022 * Polling driver
34023 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
34024 index b859d16..5cc6b1a 100644
34025 --- a/drivers/pnp/pnpbios/bioscalls.c
34026 +++ b/drivers/pnp/pnpbios/bioscalls.c
34027 @@ -59,7 +59,7 @@ do { \
34028 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
34029 } while(0)
34030
34031 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
34032 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
34033 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
34034
34035 /*
34036 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34037
34038 cpu = get_cpu();
34039 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
34040 +
34041 + pax_open_kernel();
34042 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
34043 + pax_close_kernel();
34044
34045 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
34046 spin_lock_irqsave(&pnp_bios_lock, flags);
34047 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34048 :"memory");
34049 spin_unlock_irqrestore(&pnp_bios_lock, flags);
34050
34051 + pax_open_kernel();
34052 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
34053 + pax_close_kernel();
34054 +
34055 put_cpu();
34056
34057 /* If we get here and this is set then the PnP BIOS faulted on us. */
34058 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
34059 return status;
34060 }
34061
34062 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
34063 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
34064 {
34065 int i;
34066
34067 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34068 pnp_bios_callpoint.offset = header->fields.pm16offset;
34069 pnp_bios_callpoint.segment = PNP_CS16;
34070
34071 + pax_open_kernel();
34072 +
34073 for_each_possible_cpu(i) {
34074 struct desc_struct *gdt = get_cpu_gdt_table(i);
34075 if (!gdt)
34076 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34077 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
34078 (unsigned long)__va(header->fields.pm16dseg));
34079 }
34080 +
34081 + pax_close_kernel();
34082 }
34083 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
34084 index b0ecacb..7c9da2e 100644
34085 --- a/drivers/pnp/resource.c
34086 +++ b/drivers/pnp/resource.c
34087 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
34088 return 1;
34089
34090 /* check if the resource is valid */
34091 - if (*irq < 0 || *irq > 15)
34092 + if (*irq > 15)
34093 return 0;
34094
34095 /* check if the resource is reserved */
34096 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
34097 return 1;
34098
34099 /* check if the resource is valid */
34100 - if (*dma < 0 || *dma == 4 || *dma > 7)
34101 + if (*dma == 4 || *dma > 7)
34102 return 0;
34103
34104 /* check if the resource is reserved */
34105 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
34106 index bb16f5b..c751eef 100644
34107 --- a/drivers/power/bq27x00_battery.c
34108 +++ b/drivers/power/bq27x00_battery.c
34109 @@ -67,7 +67,7 @@
34110 struct bq27x00_device_info;
34111 struct bq27x00_access_methods {
34112 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
34113 -};
34114 +} __no_const;
34115
34116 enum bq27x00_chip { BQ27000, BQ27500 };
34117
34118 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
34119 index 33f5d9a..d957d3f 100644
34120 --- a/drivers/regulator/max8660.c
34121 +++ b/drivers/regulator/max8660.c
34122 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
34123 max8660->shadow_regs[MAX8660_OVER1] = 5;
34124 } else {
34125 /* Otherwise devices can be toggled via software */
34126 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
34127 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
34128 + pax_open_kernel();
34129 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
34130 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
34131 + pax_close_kernel();
34132 }
34133
34134 /*
34135 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
34136 index 023d17d..74ef35b 100644
34137 --- a/drivers/regulator/mc13892-regulator.c
34138 +++ b/drivers/regulator/mc13892-regulator.c
34139 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
34140 }
34141 mc13xxx_unlock(mc13892);
34142
34143 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34144 + pax_open_kernel();
34145 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34146 = mc13892_vcam_set_mode;
34147 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34148 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34149 = mc13892_vcam_get_mode;
34150 + pax_close_kernel();
34151 for (i = 0; i < pdata->num_regulators; i++) {
34152 init_data = &pdata->regulators[i];
34153 priv->regulators[i] = regulator_register(
34154 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
34155 index cace6d3..f623fda 100644
34156 --- a/drivers/rtc/rtc-dev.c
34157 +++ b/drivers/rtc/rtc-dev.c
34158 @@ -14,6 +14,7 @@
34159 #include <linux/module.h>
34160 #include <linux/rtc.h>
34161 #include <linux/sched.h>
34162 +#include <linux/grsecurity.h>
34163 #include "rtc-core.h"
34164
34165 static dev_t rtc_devt;
34166 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
34167 if (copy_from_user(&tm, uarg, sizeof(tm)))
34168 return -EFAULT;
34169
34170 + gr_log_timechange();
34171 +
34172 return rtc_set_time(rtc, &tm);
34173
34174 case RTC_PIE_ON:
34175 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
34176 index ffb5878..e6d785c 100644
34177 --- a/drivers/scsi/aacraid/aacraid.h
34178 +++ b/drivers/scsi/aacraid/aacraid.h
34179 @@ -492,7 +492,7 @@ struct adapter_ops
34180 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
34181 /* Administrative operations */
34182 int (*adapter_comm)(struct aac_dev * dev, int comm);
34183 -};
34184 +} __no_const;
34185
34186 /*
34187 * Define which interrupt handler needs to be installed
34188 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
34189 index 705e13e..91c873c 100644
34190 --- a/drivers/scsi/aacraid/linit.c
34191 +++ b/drivers/scsi/aacraid/linit.c
34192 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
34193 #elif defined(__devinitconst)
34194 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34195 #else
34196 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34197 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34198 #endif
34199 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34200 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34201 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34202 index d5ff142..49c0ebb 100644
34203 --- a/drivers/scsi/aic94xx/aic94xx_init.c
34204 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
34205 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34206 .lldd_control_phy = asd_control_phy,
34207 };
34208
34209 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34210 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34211 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34212 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34213 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34214 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34215 index a796de9..1ef20e1 100644
34216 --- a/drivers/scsi/bfa/bfa.h
34217 +++ b/drivers/scsi/bfa/bfa.h
34218 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
34219 u32 *end);
34220 int cpe_vec_q0;
34221 int rme_vec_q0;
34222 -};
34223 +} __no_const;
34224 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34225
34226 struct bfa_faa_cbfn_s {
34227 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34228 index e07bd47..cd1bbbb 100644
34229 --- a/drivers/scsi/bfa/bfa_fcpim.c
34230 +++ b/drivers/scsi/bfa/bfa_fcpim.c
34231 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34232
34233 bfa_iotag_attach(fcp);
34234
34235 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34236 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34237 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34238 (fcp->num_itns * sizeof(struct bfa_itn_s));
34239 memset(fcp->itn_arr, 0,
34240 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34241 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34242 {
34243 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34244 - struct bfa_itn_s *itn;
34245 + bfa_itn_s_no_const *itn;
34246
34247 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34248 itn->isr = isr;
34249 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34250 index 1080bcb..a3b39e3 100644
34251 --- a/drivers/scsi/bfa/bfa_fcpim.h
34252 +++ b/drivers/scsi/bfa/bfa_fcpim.h
34253 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
34254 struct bfa_itn_s {
34255 bfa_isr_func_t isr;
34256 };
34257 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34258
34259 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34260 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34261 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34262 struct list_head iotag_tio_free_q; /* free IO resources */
34263 struct list_head iotag_unused_q; /* unused IO resources*/
34264 struct bfa_iotag_s *iotag_arr;
34265 - struct bfa_itn_s *itn_arr;
34266 + bfa_itn_s_no_const *itn_arr;
34267 int num_ioim_reqs;
34268 int num_fwtio_reqs;
34269 int num_itns;
34270 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34271 index 546d46b..642fa5b 100644
34272 --- a/drivers/scsi/bfa/bfa_ioc.h
34273 +++ b/drivers/scsi/bfa/bfa_ioc.h
34274 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34275 bfa_ioc_disable_cbfn_t disable_cbfn;
34276 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34277 bfa_ioc_reset_cbfn_t reset_cbfn;
34278 -};
34279 +} __no_const;
34280
34281 /*
34282 * IOC event notification mechanism.
34283 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34284 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34285 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34286 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34287 -};
34288 +} __no_const;
34289
34290 /*
34291 * Queue element to wait for room in request queue. FIFO order is
34292 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34293 index 351dc0b..951dc32 100644
34294 --- a/drivers/scsi/hosts.c
34295 +++ b/drivers/scsi/hosts.c
34296 @@ -42,7 +42,7 @@
34297 #include "scsi_logging.h"
34298
34299
34300 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34301 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34302
34303
34304 static void scsi_host_cls_release(struct device *dev)
34305 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34306 * subtract one because we increment first then return, but we need to
34307 * know what the next host number was before increment
34308 */
34309 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34310 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34311 shost->dma_channel = 0xff;
34312
34313 /* These three are default values which can be overridden */
34314 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34315 index 865d452..e9b7fa7 100644
34316 --- a/drivers/scsi/hpsa.c
34317 +++ b/drivers/scsi/hpsa.c
34318 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34319 u32 a;
34320
34321 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34322 - return h->access.command_completed(h);
34323 + return h->access->command_completed(h);
34324
34325 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34326 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34327 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34328 while (!list_empty(&h->reqQ)) {
34329 c = list_entry(h->reqQ.next, struct CommandList, list);
34330 /* can't do anything if fifo is full */
34331 - if ((h->access.fifo_full(h))) {
34332 + if ((h->access->fifo_full(h))) {
34333 dev_warn(&h->pdev->dev, "fifo full\n");
34334 break;
34335 }
34336 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34337 h->Qdepth--;
34338
34339 /* Tell the controller execute command */
34340 - h->access.submit_command(h, c);
34341 + h->access->submit_command(h, c);
34342
34343 /* Put job onto the completed Q */
34344 addQ(&h->cmpQ, c);
34345 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34346
34347 static inline unsigned long get_next_completion(struct ctlr_info *h)
34348 {
34349 - return h->access.command_completed(h);
34350 + return h->access->command_completed(h);
34351 }
34352
34353 static inline bool interrupt_pending(struct ctlr_info *h)
34354 {
34355 - return h->access.intr_pending(h);
34356 + return h->access->intr_pending(h);
34357 }
34358
34359 static inline long interrupt_not_for_us(struct ctlr_info *h)
34360 {
34361 - return (h->access.intr_pending(h) == 0) ||
34362 + return (h->access->intr_pending(h) == 0) ||
34363 (h->interrupts_enabled == 0);
34364 }
34365
34366 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34367 if (prod_index < 0)
34368 return -ENODEV;
34369 h->product_name = products[prod_index].product_name;
34370 - h->access = *(products[prod_index].access);
34371 + h->access = products[prod_index].access;
34372
34373 if (hpsa_board_disabled(h->pdev)) {
34374 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34375 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34376
34377 assert_spin_locked(&lockup_detector_lock);
34378 remove_ctlr_from_lockup_detector_list(h);
34379 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34380 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34381 spin_lock_irqsave(&h->lock, flags);
34382 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34383 spin_unlock_irqrestore(&h->lock, flags);
34384 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34385 }
34386
34387 /* make sure the board interrupts are off */
34388 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34389 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34390
34391 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34392 goto clean2;
34393 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34394 * fake ones to scoop up any residual completions.
34395 */
34396 spin_lock_irqsave(&h->lock, flags);
34397 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34398 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34399 spin_unlock_irqrestore(&h->lock, flags);
34400 free_irq(h->intr[h->intr_mode], h);
34401 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34402 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34403 dev_info(&h->pdev->dev, "Board READY.\n");
34404 dev_info(&h->pdev->dev,
34405 "Waiting for stale completions to drain.\n");
34406 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34407 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34408 msleep(10000);
34409 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34410 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34411
34412 rc = controller_reset_failed(h->cfgtable);
34413 if (rc)
34414 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34415 }
34416
34417 /* Turn the interrupts on so we can service requests */
34418 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34419 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34420
34421 hpsa_hba_inquiry(h);
34422 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34423 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34424 * To write all data in the battery backed cache to disks
34425 */
34426 hpsa_flush_cache(h);
34427 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34428 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34429 free_irq(h->intr[h->intr_mode], h);
34430 #ifdef CONFIG_PCI_MSI
34431 if (h->msix_vector)
34432 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34433 return;
34434 }
34435 /* Change the access methods to the performant access methods */
34436 - h->access = SA5_performant_access;
34437 + h->access = &SA5_performant_access;
34438 h->transMethod = CFGTBL_Trans_Performant;
34439 }
34440
34441 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34442 index 91edafb..a9b88ec 100644
34443 --- a/drivers/scsi/hpsa.h
34444 +++ b/drivers/scsi/hpsa.h
34445 @@ -73,7 +73,7 @@ struct ctlr_info {
34446 unsigned int msix_vector;
34447 unsigned int msi_vector;
34448 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34449 - struct access_method access;
34450 + struct access_method *access;
34451
34452 /* queue and queue Info */
34453 struct list_head reqQ;
34454 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34455 index f2df059..a3a9930 100644
34456 --- a/drivers/scsi/ips.h
34457 +++ b/drivers/scsi/ips.h
34458 @@ -1027,7 +1027,7 @@ typedef struct {
34459 int (*intr)(struct ips_ha *);
34460 void (*enableint)(struct ips_ha *);
34461 uint32_t (*statupd)(struct ips_ha *);
34462 -} ips_hw_func_t;
34463 +} __no_const ips_hw_func_t;
34464
34465 typedef struct ips_ha {
34466 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34467 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34468 index 9de9db2..1e09660 100644
34469 --- a/drivers/scsi/libfc/fc_exch.c
34470 +++ b/drivers/scsi/libfc/fc_exch.c
34471 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
34472 * all together if not used XXX
34473 */
34474 struct {
34475 - atomic_t no_free_exch;
34476 - atomic_t no_free_exch_xid;
34477 - atomic_t xid_not_found;
34478 - atomic_t xid_busy;
34479 - atomic_t seq_not_found;
34480 - atomic_t non_bls_resp;
34481 + atomic_unchecked_t no_free_exch;
34482 + atomic_unchecked_t no_free_exch_xid;
34483 + atomic_unchecked_t xid_not_found;
34484 + atomic_unchecked_t xid_busy;
34485 + atomic_unchecked_t seq_not_found;
34486 + atomic_unchecked_t non_bls_resp;
34487 } stats;
34488 };
34489
34490 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34491 /* allocate memory for exchange */
34492 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34493 if (!ep) {
34494 - atomic_inc(&mp->stats.no_free_exch);
34495 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34496 goto out;
34497 }
34498 memset(ep, 0, sizeof(*ep));
34499 @@ -780,7 +780,7 @@ out:
34500 return ep;
34501 err:
34502 spin_unlock_bh(&pool->lock);
34503 - atomic_inc(&mp->stats.no_free_exch_xid);
34504 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34505 mempool_free(ep, mp->ep_pool);
34506 return NULL;
34507 }
34508 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34509 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34510 ep = fc_exch_find(mp, xid);
34511 if (!ep) {
34512 - atomic_inc(&mp->stats.xid_not_found);
34513 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34514 reject = FC_RJT_OX_ID;
34515 goto out;
34516 }
34517 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34518 ep = fc_exch_find(mp, xid);
34519 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34520 if (ep) {
34521 - atomic_inc(&mp->stats.xid_busy);
34522 + atomic_inc_unchecked(&mp->stats.xid_busy);
34523 reject = FC_RJT_RX_ID;
34524 goto rel;
34525 }
34526 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34527 }
34528 xid = ep->xid; /* get our XID */
34529 } else if (!ep) {
34530 - atomic_inc(&mp->stats.xid_not_found);
34531 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34532 reject = FC_RJT_RX_ID; /* XID not found */
34533 goto out;
34534 }
34535 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34536 } else {
34537 sp = &ep->seq;
34538 if (sp->id != fh->fh_seq_id) {
34539 - atomic_inc(&mp->stats.seq_not_found);
34540 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34541 if (f_ctl & FC_FC_END_SEQ) {
34542 /*
34543 * Update sequence_id based on incoming last
34544 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34545
34546 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34547 if (!ep) {
34548 - atomic_inc(&mp->stats.xid_not_found);
34549 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34550 goto out;
34551 }
34552 if (ep->esb_stat & ESB_ST_COMPLETE) {
34553 - atomic_inc(&mp->stats.xid_not_found);
34554 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34555 goto rel;
34556 }
34557 if (ep->rxid == FC_XID_UNKNOWN)
34558 ep->rxid = ntohs(fh->fh_rx_id);
34559 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34560 - atomic_inc(&mp->stats.xid_not_found);
34561 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34562 goto rel;
34563 }
34564 if (ep->did != ntoh24(fh->fh_s_id) &&
34565 ep->did != FC_FID_FLOGI) {
34566 - atomic_inc(&mp->stats.xid_not_found);
34567 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34568 goto rel;
34569 }
34570 sof = fr_sof(fp);
34571 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34572 sp->ssb_stat |= SSB_ST_RESP;
34573 sp->id = fh->fh_seq_id;
34574 } else if (sp->id != fh->fh_seq_id) {
34575 - atomic_inc(&mp->stats.seq_not_found);
34576 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34577 goto rel;
34578 }
34579
34580 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34581 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34582
34583 if (!sp)
34584 - atomic_inc(&mp->stats.xid_not_found);
34585 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34586 else
34587 - atomic_inc(&mp->stats.non_bls_resp);
34588 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34589
34590 fc_frame_free(fp);
34591 }
34592 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34593 index db9238f..4378ed2 100644
34594 --- a/drivers/scsi/libsas/sas_ata.c
34595 +++ b/drivers/scsi/libsas/sas_ata.c
34596 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34597 .postreset = ata_std_postreset,
34598 .error_handler = ata_std_error_handler,
34599 .post_internal_cmd = sas_ata_post_internal,
34600 - .qc_defer = ata_std_qc_defer,
34601 + .qc_defer = ata_std_qc_defer,
34602 .qc_prep = ata_noop_qc_prep,
34603 .qc_issue = sas_ata_qc_issue,
34604 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34605 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34606 index bb4c8e0..f33d849 100644
34607 --- a/drivers/scsi/lpfc/lpfc.h
34608 +++ b/drivers/scsi/lpfc/lpfc.h
34609 @@ -425,7 +425,7 @@ struct lpfc_vport {
34610 struct dentry *debug_nodelist;
34611 struct dentry *vport_debugfs_root;
34612 struct lpfc_debugfs_trc *disc_trc;
34613 - atomic_t disc_trc_cnt;
34614 + atomic_unchecked_t disc_trc_cnt;
34615 #endif
34616 uint8_t stat_data_enabled;
34617 uint8_t stat_data_blocked;
34618 @@ -835,8 +835,8 @@ struct lpfc_hba {
34619 struct timer_list fabric_block_timer;
34620 unsigned long bit_flags;
34621 #define FABRIC_COMANDS_BLOCKED 0
34622 - atomic_t num_rsrc_err;
34623 - atomic_t num_cmd_success;
34624 + atomic_unchecked_t num_rsrc_err;
34625 + atomic_unchecked_t num_cmd_success;
34626 unsigned long last_rsrc_error_time;
34627 unsigned long last_ramp_down_time;
34628 unsigned long last_ramp_up_time;
34629 @@ -866,7 +866,7 @@ struct lpfc_hba {
34630
34631 struct dentry *debug_slow_ring_trc;
34632 struct lpfc_debugfs_trc *slow_ring_trc;
34633 - atomic_t slow_ring_trc_cnt;
34634 + atomic_unchecked_t slow_ring_trc_cnt;
34635 /* iDiag debugfs sub-directory */
34636 struct dentry *idiag_root;
34637 struct dentry *idiag_pci_cfg;
34638 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34639 index 2838259..a07cfb5 100644
34640 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
34641 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34642 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34643
34644 #include <linux/debugfs.h>
34645
34646 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34647 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34648 static unsigned long lpfc_debugfs_start_time = 0L;
34649
34650 /* iDiag */
34651 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34652 lpfc_debugfs_enable = 0;
34653
34654 len = 0;
34655 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34656 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34657 (lpfc_debugfs_max_disc_trc - 1);
34658 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34659 dtp = vport->disc_trc + i;
34660 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34661 lpfc_debugfs_enable = 0;
34662
34663 len = 0;
34664 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34665 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34666 (lpfc_debugfs_max_slow_ring_trc - 1);
34667 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34668 dtp = phba->slow_ring_trc + i;
34669 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34670 !vport || !vport->disc_trc)
34671 return;
34672
34673 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34674 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34675 (lpfc_debugfs_max_disc_trc - 1);
34676 dtp = vport->disc_trc + index;
34677 dtp->fmt = fmt;
34678 dtp->data1 = data1;
34679 dtp->data2 = data2;
34680 dtp->data3 = data3;
34681 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34682 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34683 dtp->jif = jiffies;
34684 #endif
34685 return;
34686 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34687 !phba || !phba->slow_ring_trc)
34688 return;
34689
34690 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34691 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34692 (lpfc_debugfs_max_slow_ring_trc - 1);
34693 dtp = phba->slow_ring_trc + index;
34694 dtp->fmt = fmt;
34695 dtp->data1 = data1;
34696 dtp->data2 = data2;
34697 dtp->data3 = data3;
34698 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34699 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34700 dtp->jif = jiffies;
34701 #endif
34702 return;
34703 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34704 "slow_ring buffer\n");
34705 goto debug_failed;
34706 }
34707 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34708 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34709 memset(phba->slow_ring_trc, 0,
34710 (sizeof(struct lpfc_debugfs_trc) *
34711 lpfc_debugfs_max_slow_ring_trc));
34712 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34713 "buffer\n");
34714 goto debug_failed;
34715 }
34716 - atomic_set(&vport->disc_trc_cnt, 0);
34717 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34718
34719 snprintf(name, sizeof(name), "discovery_trace");
34720 vport->debug_disc_trc =
34721 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34722 index 55bc4fc..a2a109c 100644
34723 --- a/drivers/scsi/lpfc/lpfc_init.c
34724 +++ b/drivers/scsi/lpfc/lpfc_init.c
34725 @@ -10027,8 +10027,10 @@ lpfc_init(void)
34726 printk(LPFC_COPYRIGHT "\n");
34727
34728 if (lpfc_enable_npiv) {
34729 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34730 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34731 + pax_open_kernel();
34732 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34733 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34734 + pax_close_kernel();
34735 }
34736 lpfc_transport_template =
34737 fc_attach_transport(&lpfc_transport_functions);
34738 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34739 index 2e1e54e..1af0a0d 100644
34740 --- a/drivers/scsi/lpfc/lpfc_scsi.c
34741 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
34742 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34743 uint32_t evt_posted;
34744
34745 spin_lock_irqsave(&phba->hbalock, flags);
34746 - atomic_inc(&phba->num_rsrc_err);
34747 + atomic_inc_unchecked(&phba->num_rsrc_err);
34748 phba->last_rsrc_error_time = jiffies;
34749
34750 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34751 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34752 unsigned long flags;
34753 struct lpfc_hba *phba = vport->phba;
34754 uint32_t evt_posted;
34755 - atomic_inc(&phba->num_cmd_success);
34756 + atomic_inc_unchecked(&phba->num_cmd_success);
34757
34758 if (vport->cfg_lun_queue_depth <= queue_depth)
34759 return;
34760 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34761 unsigned long num_rsrc_err, num_cmd_success;
34762 int i;
34763
34764 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34765 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34766 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34767 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34768
34769 vports = lpfc_create_vport_work_array(phba);
34770 if (vports != NULL)
34771 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34772 }
34773 }
34774 lpfc_destroy_vport_work_array(phba, vports);
34775 - atomic_set(&phba->num_rsrc_err, 0);
34776 - atomic_set(&phba->num_cmd_success, 0);
34777 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34778 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34779 }
34780
34781 /**
34782 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34783 }
34784 }
34785 lpfc_destroy_vport_work_array(phba, vports);
34786 - atomic_set(&phba->num_rsrc_err, 0);
34787 - atomic_set(&phba->num_cmd_success, 0);
34788 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34789 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34790 }
34791
34792 /**
34793 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34794 index 5163edb..7b142bc 100644
34795 --- a/drivers/scsi/pmcraid.c
34796 +++ b/drivers/scsi/pmcraid.c
34797 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34798 res->scsi_dev = scsi_dev;
34799 scsi_dev->hostdata = res;
34800 res->change_detected = 0;
34801 - atomic_set(&res->read_failures, 0);
34802 - atomic_set(&res->write_failures, 0);
34803 + atomic_set_unchecked(&res->read_failures, 0);
34804 + atomic_set_unchecked(&res->write_failures, 0);
34805 rc = 0;
34806 }
34807 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34808 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34809
34810 /* If this was a SCSI read/write command keep count of errors */
34811 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34812 - atomic_inc(&res->read_failures);
34813 + atomic_inc_unchecked(&res->read_failures);
34814 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34815 - atomic_inc(&res->write_failures);
34816 + atomic_inc_unchecked(&res->write_failures);
34817
34818 if (!RES_IS_GSCSI(res->cfg_entry) &&
34819 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34820 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34821 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34822 * hrrq_id assigned here in queuecommand
34823 */
34824 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34825 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34826 pinstance->num_hrrq;
34827 cmd->cmd_done = pmcraid_io_done;
34828
34829 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34830 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34831 * hrrq_id assigned here in queuecommand
34832 */
34833 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34834 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34835 pinstance->num_hrrq;
34836
34837 if (request_size) {
34838 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34839
34840 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34841 /* add resources only after host is added into system */
34842 - if (!atomic_read(&pinstance->expose_resources))
34843 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34844 return;
34845
34846 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34847 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34848 init_waitqueue_head(&pinstance->reset_wait_q);
34849
34850 atomic_set(&pinstance->outstanding_cmds, 0);
34851 - atomic_set(&pinstance->last_message_id, 0);
34852 - atomic_set(&pinstance->expose_resources, 0);
34853 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34854 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34855
34856 INIT_LIST_HEAD(&pinstance->free_res_q);
34857 INIT_LIST_HEAD(&pinstance->used_res_q);
34858 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34859 /* Schedule worker thread to handle CCN and take care of adding and
34860 * removing devices to OS
34861 */
34862 - atomic_set(&pinstance->expose_resources, 1);
34863 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34864 schedule_work(&pinstance->worker_q);
34865 return rc;
34866
34867 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34868 index ca496c7..9c791d5 100644
34869 --- a/drivers/scsi/pmcraid.h
34870 +++ b/drivers/scsi/pmcraid.h
34871 @@ -748,7 +748,7 @@ struct pmcraid_instance {
34872 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34873
34874 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34875 - atomic_t last_message_id;
34876 + atomic_unchecked_t last_message_id;
34877
34878 /* configuration table */
34879 struct pmcraid_config_table *cfg_table;
34880 @@ -777,7 +777,7 @@ struct pmcraid_instance {
34881 atomic_t outstanding_cmds;
34882
34883 /* should add/delete resources to mid-layer now ?*/
34884 - atomic_t expose_resources;
34885 + atomic_unchecked_t expose_resources;
34886
34887
34888
34889 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34890 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34891 };
34892 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34893 - atomic_t read_failures; /* count of failed READ commands */
34894 - atomic_t write_failures; /* count of failed WRITE commands */
34895 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34896 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34897
34898 /* To indicate add/delete/modify during CCN */
34899 u8 change_detected;
34900 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34901 index fcf052c..a8025a4 100644
34902 --- a/drivers/scsi/qla2xxx/qla_def.h
34903 +++ b/drivers/scsi/qla2xxx/qla_def.h
34904 @@ -2244,7 +2244,7 @@ struct isp_operations {
34905 int (*get_flash_version) (struct scsi_qla_host *, void *);
34906 int (*start_scsi) (srb_t *);
34907 int (*abort_isp) (struct scsi_qla_host *);
34908 -};
34909 +} __no_const;
34910
34911 /* MSI-X Support *************************************************************/
34912
34913 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34914 index fd5edc6..4906148 100644
34915 --- a/drivers/scsi/qla4xxx/ql4_def.h
34916 +++ b/drivers/scsi/qla4xxx/ql4_def.h
34917 @@ -258,7 +258,7 @@ struct ddb_entry {
34918 * (4000 only) */
34919 atomic_t relogin_timer; /* Max Time to wait for
34920 * relogin to complete */
34921 - atomic_t relogin_retry_count; /* Num of times relogin has been
34922 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34923 * retried */
34924 uint32_t default_time2wait; /* Default Min time between
34925 * relogins (+aens) */
34926 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34927 index 4169c8b..a8b896b 100644
34928 --- a/drivers/scsi/qla4xxx/ql4_os.c
34929 +++ b/drivers/scsi/qla4xxx/ql4_os.c
34930 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34931 */
34932 if (!iscsi_is_session_online(cls_sess)) {
34933 /* Reset retry relogin timer */
34934 - atomic_inc(&ddb_entry->relogin_retry_count);
34935 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34936 DEBUG2(ql4_printk(KERN_INFO, ha,
34937 "%s: index[%d] relogin timed out-retrying"
34938 " relogin (%d), retry (%d)\n", __func__,
34939 ddb_entry->fw_ddb_index,
34940 - atomic_read(&ddb_entry->relogin_retry_count),
34941 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34942 ddb_entry->default_time2wait + 4));
34943 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34944 atomic_set(&ddb_entry->retry_relogin_timer,
34945 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34946
34947 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34948 atomic_set(&ddb_entry->relogin_timer, 0);
34949 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34950 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34951
34952 ddb_entry->default_relogin_timeout =
34953 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34954 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34955 index 2aeb2e9..46e3925 100644
34956 --- a/drivers/scsi/scsi.c
34957 +++ b/drivers/scsi/scsi.c
34958 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34959 unsigned long timeout;
34960 int rtn = 0;
34961
34962 - atomic_inc(&cmd->device->iorequest_cnt);
34963 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34964
34965 /* check if the device is still usable */
34966 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34967 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34968 index f85cfa6..a57c9e8 100644
34969 --- a/drivers/scsi/scsi_lib.c
34970 +++ b/drivers/scsi/scsi_lib.c
34971 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34972 shost = sdev->host;
34973 scsi_init_cmd_errh(cmd);
34974 cmd->result = DID_NO_CONNECT << 16;
34975 - atomic_inc(&cmd->device->iorequest_cnt);
34976 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34977
34978 /*
34979 * SCSI request completion path will do scsi_device_unbusy(),
34980 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34981
34982 INIT_LIST_HEAD(&cmd->eh_entry);
34983
34984 - atomic_inc(&cmd->device->iodone_cnt);
34985 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34986 if (cmd->result)
34987 - atomic_inc(&cmd->device->ioerr_cnt);
34988 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34989
34990 disposition = scsi_decide_disposition(cmd);
34991 if (disposition != SUCCESS &&
34992 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34993 index 04c2a27..9d8bd66 100644
34994 --- a/drivers/scsi/scsi_sysfs.c
34995 +++ b/drivers/scsi/scsi_sysfs.c
34996 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34997 char *buf) \
34998 { \
34999 struct scsi_device *sdev = to_scsi_device(dev); \
35000 - unsigned long long count = atomic_read(&sdev->field); \
35001 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
35002 return snprintf(buf, 20, "0x%llx\n", count); \
35003 } \
35004 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
35005 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
35006 index 84a1fdf..693b0d6 100644
35007 --- a/drivers/scsi/scsi_tgt_lib.c
35008 +++ b/drivers/scsi/scsi_tgt_lib.c
35009 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
35010 int err;
35011
35012 dprintk("%lx %u\n", uaddr, len);
35013 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
35014 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
35015 if (err) {
35016 /*
35017 * TODO: need to fixup sg_tablesize, max_segment_size,
35018 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
35019 index 1b21491..1b7f60e 100644
35020 --- a/drivers/scsi/scsi_transport_fc.c
35021 +++ b/drivers/scsi/scsi_transport_fc.c
35022 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
35023 * Netlink Infrastructure
35024 */
35025
35026 -static atomic_t fc_event_seq;
35027 +static atomic_unchecked_t fc_event_seq;
35028
35029 /**
35030 * fc_get_event_number - Obtain the next sequential FC event number
35031 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
35032 u32
35033 fc_get_event_number(void)
35034 {
35035 - return atomic_add_return(1, &fc_event_seq);
35036 + return atomic_add_return_unchecked(1, &fc_event_seq);
35037 }
35038 EXPORT_SYMBOL(fc_get_event_number);
35039
35040 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
35041 {
35042 int error;
35043
35044 - atomic_set(&fc_event_seq, 0);
35045 + atomic_set_unchecked(&fc_event_seq, 0);
35046
35047 error = transport_class_register(&fc_host_class);
35048 if (error)
35049 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
35050 char *cp;
35051
35052 *val = simple_strtoul(buf, &cp, 0);
35053 - if ((*cp && (*cp != '\n')) || (*val < 0))
35054 + if (*cp && (*cp != '\n'))
35055 return -EINVAL;
35056 /*
35057 * Check for overflow; dev_loss_tmo is u32
35058 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
35059 index 96029e6..4d77fa0 100644
35060 --- a/drivers/scsi/scsi_transport_iscsi.c
35061 +++ b/drivers/scsi/scsi_transport_iscsi.c
35062 @@ -79,7 +79,7 @@ struct iscsi_internal {
35063 struct transport_container session_cont;
35064 };
35065
35066 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
35067 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
35068 static struct workqueue_struct *iscsi_eh_timer_workq;
35069
35070 static DEFINE_IDA(iscsi_sess_ida);
35071 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
35072 int err;
35073
35074 ihost = shost->shost_data;
35075 - session->sid = atomic_add_return(1, &iscsi_session_nr);
35076 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
35077
35078 if (target_id == ISCSI_MAX_TARGET) {
35079 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
35080 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
35081 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
35082 ISCSI_TRANSPORT_VERSION);
35083
35084 - atomic_set(&iscsi_session_nr, 0);
35085 + atomic_set_unchecked(&iscsi_session_nr, 0);
35086
35087 err = class_register(&iscsi_transport_class);
35088 if (err)
35089 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
35090 index 21a045e..ec89e03 100644
35091 --- a/drivers/scsi/scsi_transport_srp.c
35092 +++ b/drivers/scsi/scsi_transport_srp.c
35093 @@ -33,7 +33,7 @@
35094 #include "scsi_transport_srp_internal.h"
35095
35096 struct srp_host_attrs {
35097 - atomic_t next_port_id;
35098 + atomic_unchecked_t next_port_id;
35099 };
35100 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
35101
35102 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
35103 struct Scsi_Host *shost = dev_to_shost(dev);
35104 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
35105
35106 - atomic_set(&srp_host->next_port_id, 0);
35107 + atomic_set_unchecked(&srp_host->next_port_id, 0);
35108 return 0;
35109 }
35110
35111 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
35112 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
35113 rport->roles = ids->roles;
35114
35115 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
35116 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
35117 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
35118
35119 transport_setup_device(&rport->dev);
35120 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
35121 index 441a1c5..07cece7 100644
35122 --- a/drivers/scsi/sg.c
35123 +++ b/drivers/scsi/sg.c
35124 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
35125 sdp->disk->disk_name,
35126 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
35127 NULL,
35128 - (char *)arg);
35129 + (char __user *)arg);
35130 case BLKTRACESTART:
35131 return blk_trace_startstop(sdp->device->request_queue, 1);
35132 case BLKTRACESTOP:
35133 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
35134 const struct file_operations * fops;
35135 };
35136
35137 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
35138 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
35139 {"allow_dio", &adio_fops},
35140 {"debug", &debug_fops},
35141 {"def_reserved_size", &dressz_fops},
35142 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
35143 {
35144 int k, mask;
35145 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
35146 - struct sg_proc_leaf * leaf;
35147 + const struct sg_proc_leaf * leaf;
35148
35149 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
35150 if (!sg_proc_sgp)
35151 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
35152 index f64250e..1ee3049 100644
35153 --- a/drivers/spi/spi-dw-pci.c
35154 +++ b/drivers/spi/spi-dw-pci.c
35155 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
35156 #define spi_resume NULL
35157 #endif
35158
35159 -static const struct pci_device_id pci_ids[] __devinitdata = {
35160 +static const struct pci_device_id pci_ids[] __devinitconst = {
35161 /* Intel MID platform SPI controller 0 */
35162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
35163 {},
35164 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
35165 index 77eae99..b7cdcc9 100644
35166 --- a/drivers/spi/spi.c
35167 +++ b/drivers/spi/spi.c
35168 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
35169 EXPORT_SYMBOL_GPL(spi_bus_unlock);
35170
35171 /* portable code must never pass more than 32 bytes */
35172 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
35173 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
35174
35175 static u8 *buf;
35176
35177 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
35178 index 436fe97..4082570 100644
35179 --- a/drivers/staging/gma500/power.c
35180 +++ b/drivers/staging/gma500/power.c
35181 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
35182 ret = gma_resume_pci(dev->pdev);
35183 if (ret == 0) {
35184 /* FIXME: we want to defer this for Medfield/Oaktrail */
35185 - gma_resume_display(dev);
35186 + gma_resume_display(dev->pdev);
35187 psb_irq_preinstall(dev);
35188 psb_irq_postinstall(dev);
35189 pm_runtime_get(&dev->pdev->dev);
35190 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
35191 index bafccb3..e3ac78d 100644
35192 --- a/drivers/staging/hv/rndis_filter.c
35193 +++ b/drivers/staging/hv/rndis_filter.c
35194 @@ -42,7 +42,7 @@ struct rndis_device {
35195
35196 enum rndis_device_state state;
35197 bool link_state;
35198 - atomic_t new_req_id;
35199 + atomic_unchecked_t new_req_id;
35200
35201 spinlock_t request_lock;
35202 struct list_head req_list;
35203 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35204 * template
35205 */
35206 set = &rndis_msg->msg.set_req;
35207 - set->req_id = atomic_inc_return(&dev->new_req_id);
35208 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35209
35210 /* Add to the request list */
35211 spin_lock_irqsave(&dev->request_lock, flags);
35212 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35213
35214 /* Setup the rndis set */
35215 halt = &request->request_msg.msg.halt_req;
35216 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35217 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35218
35219 /* Ignore return since this msg is optional. */
35220 rndis_filter_send_request(dev, request);
35221 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35222 index 9e8f010..af9efb5 100644
35223 --- a/drivers/staging/iio/buffer_generic.h
35224 +++ b/drivers/staging/iio/buffer_generic.h
35225 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35226
35227 int (*is_enabled)(struct iio_buffer *buffer);
35228 int (*enable)(struct iio_buffer *buffer);
35229 -};
35230 +} __no_const;
35231
35232 /**
35233 * struct iio_buffer_setup_ops - buffer setup related callbacks
35234 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35235 index 8b307b4..a97ac91 100644
35236 --- a/drivers/staging/octeon/ethernet-rx.c
35237 +++ b/drivers/staging/octeon/ethernet-rx.c
35238 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35239 /* Increment RX stats for virtual ports */
35240 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35241 #ifdef CONFIG_64BIT
35242 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35243 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35244 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35245 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35246 #else
35247 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35248 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35249 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35250 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35251 #endif
35252 }
35253 netif_receive_skb(skb);
35254 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35255 dev->name);
35256 */
35257 #ifdef CONFIG_64BIT
35258 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35259 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35260 #else
35261 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35262 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35263 #endif
35264 dev_kfree_skb_irq(skb);
35265 }
35266 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35267 index 076f866..2308070 100644
35268 --- a/drivers/staging/octeon/ethernet.c
35269 +++ b/drivers/staging/octeon/ethernet.c
35270 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35271 * since the RX tasklet also increments it.
35272 */
35273 #ifdef CONFIG_64BIT
35274 - atomic64_add(rx_status.dropped_packets,
35275 - (atomic64_t *)&priv->stats.rx_dropped);
35276 + atomic64_add_unchecked(rx_status.dropped_packets,
35277 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35278 #else
35279 - atomic_add(rx_status.dropped_packets,
35280 - (atomic_t *)&priv->stats.rx_dropped);
35281 + atomic_add_unchecked(rx_status.dropped_packets,
35282 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35283 #endif
35284 }
35285
35286 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35287 index 7a19555..466456d 100644
35288 --- a/drivers/staging/pohmelfs/inode.c
35289 +++ b/drivers/staging/pohmelfs/inode.c
35290 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35291 mutex_init(&psb->mcache_lock);
35292 psb->mcache_root = RB_ROOT;
35293 psb->mcache_timeout = msecs_to_jiffies(5000);
35294 - atomic_long_set(&psb->mcache_gen, 0);
35295 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35296
35297 psb->trans_max_pages = 100;
35298
35299 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35300 INIT_LIST_HEAD(&psb->crypto_ready_list);
35301 INIT_LIST_HEAD(&psb->crypto_active_list);
35302
35303 - atomic_set(&psb->trans_gen, 1);
35304 + atomic_set_unchecked(&psb->trans_gen, 1);
35305 atomic_long_set(&psb->total_inodes, 0);
35306
35307 mutex_init(&psb->state_lock);
35308 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35309 index e22665c..a2a9390 100644
35310 --- a/drivers/staging/pohmelfs/mcache.c
35311 +++ b/drivers/staging/pohmelfs/mcache.c
35312 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35313 m->data = data;
35314 m->start = start;
35315 m->size = size;
35316 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35317 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35318
35319 mutex_lock(&psb->mcache_lock);
35320 err = pohmelfs_mcache_insert(psb, m);
35321 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35322 index 985b6b7..7699e05 100644
35323 --- a/drivers/staging/pohmelfs/netfs.h
35324 +++ b/drivers/staging/pohmelfs/netfs.h
35325 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35326 struct pohmelfs_sb {
35327 struct rb_root mcache_root;
35328 struct mutex mcache_lock;
35329 - atomic_long_t mcache_gen;
35330 + atomic_long_unchecked_t mcache_gen;
35331 unsigned long mcache_timeout;
35332
35333 unsigned int idx;
35334
35335 unsigned int trans_retries;
35336
35337 - atomic_t trans_gen;
35338 + atomic_unchecked_t trans_gen;
35339
35340 unsigned int crypto_attached_size;
35341 unsigned int crypto_align_size;
35342 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35343 index 06c1a74..866eebc 100644
35344 --- a/drivers/staging/pohmelfs/trans.c
35345 +++ b/drivers/staging/pohmelfs/trans.c
35346 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35347 int err;
35348 struct netfs_cmd *cmd = t->iovec.iov_base;
35349
35350 - t->gen = atomic_inc_return(&psb->trans_gen);
35351 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35352
35353 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35354 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35355 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35356 index 86308a0..feaa925 100644
35357 --- a/drivers/staging/rtl8712/rtl871x_io.h
35358 +++ b/drivers/staging/rtl8712/rtl871x_io.h
35359 @@ -108,7 +108,7 @@ struct _io_ops {
35360 u8 *pmem);
35361 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35362 u8 *pmem);
35363 -};
35364 +} __no_const;
35365
35366 struct io_req {
35367 struct list_head list;
35368 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35369 index c7b5e8b..783d6cb 100644
35370 --- a/drivers/staging/sbe-2t3e3/netdev.c
35371 +++ b/drivers/staging/sbe-2t3e3/netdev.c
35372 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35373 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35374
35375 if (rlen)
35376 - if (copy_to_user(data, &resp, rlen))
35377 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35378 return -EFAULT;
35379
35380 return 0;
35381 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35382 index be21617..0954e45 100644
35383 --- a/drivers/staging/usbip/usbip_common.h
35384 +++ b/drivers/staging/usbip/usbip_common.h
35385 @@ -289,7 +289,7 @@ struct usbip_device {
35386 void (*shutdown)(struct usbip_device *);
35387 void (*reset)(struct usbip_device *);
35388 void (*unusable)(struct usbip_device *);
35389 - } eh_ops;
35390 + } __no_const eh_ops;
35391 };
35392
35393 #if 0
35394 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35395 index 88b3298..3783eee 100644
35396 --- a/drivers/staging/usbip/vhci.h
35397 +++ b/drivers/staging/usbip/vhci.h
35398 @@ -88,7 +88,7 @@ struct vhci_hcd {
35399 unsigned resuming:1;
35400 unsigned long re_timeout;
35401
35402 - atomic_t seqnum;
35403 + atomic_unchecked_t seqnum;
35404
35405 /*
35406 * NOTE:
35407 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35408 index 2ee97e2..0420b86 100644
35409 --- a/drivers/staging/usbip/vhci_hcd.c
35410 +++ b/drivers/staging/usbip/vhci_hcd.c
35411 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35412 return;
35413 }
35414
35415 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35416 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35417 if (priv->seqnum == 0xffff)
35418 dev_info(&urb->dev->dev, "seqnum max\n");
35419
35420 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35421 return -ENOMEM;
35422 }
35423
35424 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35425 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35426 if (unlink->seqnum == 0xffff)
35427 pr_info("seqnum max\n");
35428
35429 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35430 vdev->rhport = rhport;
35431 }
35432
35433 - atomic_set(&vhci->seqnum, 0);
35434 + atomic_set_unchecked(&vhci->seqnum, 0);
35435 spin_lock_init(&vhci->lock);
35436
35437 hcd->power_budget = 0; /* no limit */
35438 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35439 index 3872b8c..fe6d2f4 100644
35440 --- a/drivers/staging/usbip/vhci_rx.c
35441 +++ b/drivers/staging/usbip/vhci_rx.c
35442 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35443 if (!urb) {
35444 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35445 pr_info("max seqnum %d\n",
35446 - atomic_read(&the_controller->seqnum));
35447 + atomic_read_unchecked(&the_controller->seqnum));
35448 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35449 return;
35450 }
35451 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35452 index 7735027..30eed13 100644
35453 --- a/drivers/staging/vt6655/hostap.c
35454 +++ b/drivers/staging/vt6655/hostap.c
35455 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35456 *
35457 */
35458
35459 +static net_device_ops_no_const apdev_netdev_ops;
35460 +
35461 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35462 {
35463 PSDevice apdev_priv;
35464 struct net_device *dev = pDevice->dev;
35465 int ret;
35466 - const struct net_device_ops apdev_netdev_ops = {
35467 - .ndo_start_xmit = pDevice->tx_80211,
35468 - };
35469
35470 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35471
35472 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35473 *apdev_priv = *pDevice;
35474 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35475
35476 + /* only half broken now */
35477 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35478 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35479
35480 pDevice->apdev->type = ARPHRD_IEEE80211;
35481 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35482 index 51b5adf..098e320 100644
35483 --- a/drivers/staging/vt6656/hostap.c
35484 +++ b/drivers/staging/vt6656/hostap.c
35485 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35486 *
35487 */
35488
35489 +static net_device_ops_no_const apdev_netdev_ops;
35490 +
35491 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35492 {
35493 PSDevice apdev_priv;
35494 struct net_device *dev = pDevice->dev;
35495 int ret;
35496 - const struct net_device_ops apdev_netdev_ops = {
35497 - .ndo_start_xmit = pDevice->tx_80211,
35498 - };
35499
35500 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35501
35502 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35503 *apdev_priv = *pDevice;
35504 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35505
35506 + /* only half broken now */
35507 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35508 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35509
35510 pDevice->apdev->type = ARPHRD_IEEE80211;
35511 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35512 index 7843dfd..3db105f 100644
35513 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
35514 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35515 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35516
35517 struct usbctlx_completor {
35518 int (*complete) (struct usbctlx_completor *);
35519 -};
35520 +} __no_const;
35521
35522 static int
35523 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35524 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35525 index 1ca66ea..76f1343 100644
35526 --- a/drivers/staging/zcache/tmem.c
35527 +++ b/drivers/staging/zcache/tmem.c
35528 @@ -39,7 +39,7 @@
35529 * A tmem host implementation must use this function to register callbacks
35530 * for memory allocation.
35531 */
35532 -static struct tmem_hostops tmem_hostops;
35533 +static tmem_hostops_no_const tmem_hostops;
35534
35535 static void tmem_objnode_tree_init(void);
35536
35537 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35538 * A tmem host implementation must use this function to register
35539 * callbacks for a page-accessible memory (PAM) implementation
35540 */
35541 -static struct tmem_pamops tmem_pamops;
35542 +static tmem_pamops_no_const tmem_pamops;
35543
35544 void tmem_register_pamops(struct tmem_pamops *m)
35545 {
35546 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35547 index ed147c4..94fc3c6 100644
35548 --- a/drivers/staging/zcache/tmem.h
35549 +++ b/drivers/staging/zcache/tmem.h
35550 @@ -180,6 +180,7 @@ struct tmem_pamops {
35551 void (*new_obj)(struct tmem_obj *);
35552 int (*replace_in_obj)(void *, struct tmem_obj *);
35553 };
35554 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35555 extern void tmem_register_pamops(struct tmem_pamops *m);
35556
35557 /* memory allocation methods provided by the host implementation */
35558 @@ -189,6 +190,7 @@ struct tmem_hostops {
35559 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35560 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35561 };
35562 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35563 extern void tmem_register_hostops(struct tmem_hostops *m);
35564
35565 /* core tmem accessor functions */
35566 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35567 index 0c1d5c73..88e90a8 100644
35568 --- a/drivers/target/iscsi/iscsi_target.c
35569 +++ b/drivers/target/iscsi/iscsi_target.c
35570 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35571 * outstanding_r2ts reaches zero, go ahead and send the delayed
35572 * TASK_ABORTED status.
35573 */
35574 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35575 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35576 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35577 if (--cmd->outstanding_r2ts < 1) {
35578 iscsit_stop_dataout_timer(cmd);
35579 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35580 index 6845228..df77141 100644
35581 --- a/drivers/target/target_core_tmr.c
35582 +++ b/drivers/target/target_core_tmr.c
35583 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35584 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35585 cmd->t_task_list_num,
35586 atomic_read(&cmd->t_task_cdbs_left),
35587 - atomic_read(&cmd->t_task_cdbs_sent),
35588 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35589 atomic_read(&cmd->t_transport_active),
35590 atomic_read(&cmd->t_transport_stop),
35591 atomic_read(&cmd->t_transport_sent));
35592 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35593 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35594 " task: %p, t_fe_count: %d dev: %p\n", task,
35595 fe_count, dev);
35596 - atomic_set(&cmd->t_transport_aborted, 1);
35597 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35598 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35599
35600 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35601 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35602 }
35603 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35604 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35605 - atomic_set(&cmd->t_transport_aborted, 1);
35606 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35607 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35608
35609 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35610 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35611 index e4ddb93..2fc6e0f 100644
35612 --- a/drivers/target/target_core_transport.c
35613 +++ b/drivers/target/target_core_transport.c
35614 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35615
35616 dev->queue_depth = dev_limits->queue_depth;
35617 atomic_set(&dev->depth_left, dev->queue_depth);
35618 - atomic_set(&dev->dev_ordered_id, 0);
35619 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35620
35621 se_dev_set_default_attribs(dev, dev_limits);
35622
35623 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35624 * Used to determine when ORDERED commands should go from
35625 * Dormant to Active status.
35626 */
35627 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35628 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35629 smp_mb__after_atomic_inc();
35630 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35631 cmd->se_ordered_id, cmd->sam_task_attr,
35632 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35633 " t_transport_active: %d t_transport_stop: %d"
35634 " t_transport_sent: %d\n", cmd->t_task_list_num,
35635 atomic_read(&cmd->t_task_cdbs_left),
35636 - atomic_read(&cmd->t_task_cdbs_sent),
35637 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35638 atomic_read(&cmd->t_task_cdbs_ex_left),
35639 atomic_read(&cmd->t_transport_active),
35640 atomic_read(&cmd->t_transport_stop),
35641 @@ -2089,9 +2089,9 @@ check_depth:
35642
35643 spin_lock_irqsave(&cmd->t_state_lock, flags);
35644 task->task_flags |= (TF_ACTIVE | TF_SENT);
35645 - atomic_inc(&cmd->t_task_cdbs_sent);
35646 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35647
35648 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
35649 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35650 cmd->t_task_list_num)
35651 atomic_set(&cmd->t_transport_sent, 1);
35652
35653 @@ -4296,7 +4296,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35654 atomic_set(&cmd->transport_lun_stop, 0);
35655 }
35656 if (!atomic_read(&cmd->t_transport_active) ||
35657 - atomic_read(&cmd->t_transport_aborted)) {
35658 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
35659 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35660 return false;
35661 }
35662 @@ -4545,7 +4545,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35663 {
35664 int ret = 0;
35665
35666 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
35667 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35668 if (!send_status ||
35669 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35670 return 1;
35671 @@ -4582,7 +4582,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35672 */
35673 if (cmd->data_direction == DMA_TO_DEVICE) {
35674 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35675 - atomic_inc(&cmd->t_transport_aborted);
35676 + atomic_inc_unchecked(&cmd->t_transport_aborted);
35677 smp_mb__after_atomic_inc();
35678 }
35679 }
35680 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35681 index b9040be..e3f5aab 100644
35682 --- a/drivers/tty/hvc/hvcs.c
35683 +++ b/drivers/tty/hvc/hvcs.c
35684 @@ -83,6 +83,7 @@
35685 #include <asm/hvcserver.h>
35686 #include <asm/uaccess.h>
35687 #include <asm/vio.h>
35688 +#include <asm/local.h>
35689
35690 /*
35691 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35692 @@ -270,7 +271,7 @@ struct hvcs_struct {
35693 unsigned int index;
35694
35695 struct tty_struct *tty;
35696 - int open_count;
35697 + local_t open_count;
35698
35699 /*
35700 * Used to tell the driver kernel_thread what operations need to take
35701 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35702
35703 spin_lock_irqsave(&hvcsd->lock, flags);
35704
35705 - if (hvcsd->open_count > 0) {
35706 + if (local_read(&hvcsd->open_count) > 0) {
35707 spin_unlock_irqrestore(&hvcsd->lock, flags);
35708 printk(KERN_INFO "HVCS: vterm state unchanged. "
35709 "The hvcs device node is still in use.\n");
35710 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35711 if ((retval = hvcs_partner_connect(hvcsd)))
35712 goto error_release;
35713
35714 - hvcsd->open_count = 1;
35715 + local_set(&hvcsd->open_count, 1);
35716 hvcsd->tty = tty;
35717 tty->driver_data = hvcsd;
35718
35719 @@ -1179,7 +1180,7 @@ fast_open:
35720
35721 spin_lock_irqsave(&hvcsd->lock, flags);
35722 kref_get(&hvcsd->kref);
35723 - hvcsd->open_count++;
35724 + local_inc(&hvcsd->open_count);
35725 hvcsd->todo_mask |= HVCS_SCHED_READ;
35726 spin_unlock_irqrestore(&hvcsd->lock, flags);
35727
35728 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35729 hvcsd = tty->driver_data;
35730
35731 spin_lock_irqsave(&hvcsd->lock, flags);
35732 - if (--hvcsd->open_count == 0) {
35733 + if (local_dec_and_test(&hvcsd->open_count)) {
35734
35735 vio_disable_interrupts(hvcsd->vdev);
35736
35737 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35738 free_irq(irq, hvcsd);
35739 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35740 return;
35741 - } else if (hvcsd->open_count < 0) {
35742 + } else if (local_read(&hvcsd->open_count) < 0) {
35743 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35744 " is missmanaged.\n",
35745 - hvcsd->vdev->unit_address, hvcsd->open_count);
35746 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35747 }
35748
35749 spin_unlock_irqrestore(&hvcsd->lock, flags);
35750 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35751
35752 spin_lock_irqsave(&hvcsd->lock, flags);
35753 /* Preserve this so that we know how many kref refs to put */
35754 - temp_open_count = hvcsd->open_count;
35755 + temp_open_count = local_read(&hvcsd->open_count);
35756
35757 /*
35758 * Don't kref put inside the spinlock because the destruction
35759 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35760 hvcsd->tty->driver_data = NULL;
35761 hvcsd->tty = NULL;
35762
35763 - hvcsd->open_count = 0;
35764 + local_set(&hvcsd->open_count, 0);
35765
35766 /* This will drop any buffered data on the floor which is OK in a hangup
35767 * scenario. */
35768 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35769 * the middle of a write operation? This is a crummy place to do this
35770 * but we want to keep it all in the spinlock.
35771 */
35772 - if (hvcsd->open_count <= 0) {
35773 + if (local_read(&hvcsd->open_count) <= 0) {
35774 spin_unlock_irqrestore(&hvcsd->lock, flags);
35775 return -ENODEV;
35776 }
35777 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35778 {
35779 struct hvcs_struct *hvcsd = tty->driver_data;
35780
35781 - if (!hvcsd || hvcsd->open_count <= 0)
35782 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35783 return 0;
35784
35785 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35786 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35787 index ef92869..f4ebd88 100644
35788 --- a/drivers/tty/ipwireless/tty.c
35789 +++ b/drivers/tty/ipwireless/tty.c
35790 @@ -29,6 +29,7 @@
35791 #include <linux/tty_driver.h>
35792 #include <linux/tty_flip.h>
35793 #include <linux/uaccess.h>
35794 +#include <asm/local.h>
35795
35796 #include "tty.h"
35797 #include "network.h"
35798 @@ -51,7 +52,7 @@ struct ipw_tty {
35799 int tty_type;
35800 struct ipw_network *network;
35801 struct tty_struct *linux_tty;
35802 - int open_count;
35803 + local_t open_count;
35804 unsigned int control_lines;
35805 struct mutex ipw_tty_mutex;
35806 int tx_bytes_queued;
35807 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35808 mutex_unlock(&tty->ipw_tty_mutex);
35809 return -ENODEV;
35810 }
35811 - if (tty->open_count == 0)
35812 + if (local_read(&tty->open_count) == 0)
35813 tty->tx_bytes_queued = 0;
35814
35815 - tty->open_count++;
35816 + local_inc(&tty->open_count);
35817
35818 tty->linux_tty = linux_tty;
35819 linux_tty->driver_data = tty;
35820 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35821
35822 static void do_ipw_close(struct ipw_tty *tty)
35823 {
35824 - tty->open_count--;
35825 -
35826 - if (tty->open_count == 0) {
35827 + if (local_dec_return(&tty->open_count) == 0) {
35828 struct tty_struct *linux_tty = tty->linux_tty;
35829
35830 if (linux_tty != NULL) {
35831 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35832 return;
35833
35834 mutex_lock(&tty->ipw_tty_mutex);
35835 - if (tty->open_count == 0) {
35836 + if (local_read(&tty->open_count) == 0) {
35837 mutex_unlock(&tty->ipw_tty_mutex);
35838 return;
35839 }
35840 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35841 return;
35842 }
35843
35844 - if (!tty->open_count) {
35845 + if (!local_read(&tty->open_count)) {
35846 mutex_unlock(&tty->ipw_tty_mutex);
35847 return;
35848 }
35849 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35850 return -ENODEV;
35851
35852 mutex_lock(&tty->ipw_tty_mutex);
35853 - if (!tty->open_count) {
35854 + if (!local_read(&tty->open_count)) {
35855 mutex_unlock(&tty->ipw_tty_mutex);
35856 return -EINVAL;
35857 }
35858 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35859 if (!tty)
35860 return -ENODEV;
35861
35862 - if (!tty->open_count)
35863 + if (!local_read(&tty->open_count))
35864 return -EINVAL;
35865
35866 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35867 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35868 if (!tty)
35869 return 0;
35870
35871 - if (!tty->open_count)
35872 + if (!local_read(&tty->open_count))
35873 return 0;
35874
35875 return tty->tx_bytes_queued;
35876 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35877 if (!tty)
35878 return -ENODEV;
35879
35880 - if (!tty->open_count)
35881 + if (!local_read(&tty->open_count))
35882 return -EINVAL;
35883
35884 return get_control_lines(tty);
35885 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35886 if (!tty)
35887 return -ENODEV;
35888
35889 - if (!tty->open_count)
35890 + if (!local_read(&tty->open_count))
35891 return -EINVAL;
35892
35893 return set_control_lines(tty, set, clear);
35894 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35895 if (!tty)
35896 return -ENODEV;
35897
35898 - if (!tty->open_count)
35899 + if (!local_read(&tty->open_count))
35900 return -EINVAL;
35901
35902 /* FIXME: Exactly how is the tty object locked here .. */
35903 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35904 against a parallel ioctl etc */
35905 mutex_lock(&ttyj->ipw_tty_mutex);
35906 }
35907 - while (ttyj->open_count)
35908 + while (local_read(&ttyj->open_count))
35909 do_ipw_close(ttyj);
35910 ipwireless_disassociate_network_ttys(network,
35911 ttyj->channel_idx);
35912 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35913 index fc7bbba..9527e93 100644
35914 --- a/drivers/tty/n_gsm.c
35915 +++ b/drivers/tty/n_gsm.c
35916 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35917 kref_init(&dlci->ref);
35918 mutex_init(&dlci->mutex);
35919 dlci->fifo = &dlci->_fifo;
35920 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35921 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35922 kfree(dlci);
35923 return NULL;
35924 }
35925 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35926 index 39d6ab6..eb97f41 100644
35927 --- a/drivers/tty/n_tty.c
35928 +++ b/drivers/tty/n_tty.c
35929 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35930 {
35931 *ops = tty_ldisc_N_TTY;
35932 ops->owner = NULL;
35933 - ops->refcount = ops->flags = 0;
35934 + atomic_set(&ops->refcount, 0);
35935 + ops->flags = 0;
35936 }
35937 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35938 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35939 index e18604b..a7d5a11 100644
35940 --- a/drivers/tty/pty.c
35941 +++ b/drivers/tty/pty.c
35942 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35943 register_sysctl_table(pty_root_table);
35944
35945 /* Now create the /dev/ptmx special device */
35946 + pax_open_kernel();
35947 tty_default_fops(&ptmx_fops);
35948 - ptmx_fops.open = ptmx_open;
35949 + *(void **)&ptmx_fops.open = ptmx_open;
35950 + pax_close_kernel();
35951
35952 cdev_init(&ptmx_cdev, &ptmx_fops);
35953 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35954 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35955 index 2b42a01..32a2ed3 100644
35956 --- a/drivers/tty/serial/kgdboc.c
35957 +++ b/drivers/tty/serial/kgdboc.c
35958 @@ -24,8 +24,9 @@
35959 #define MAX_CONFIG_LEN 40
35960
35961 static struct kgdb_io kgdboc_io_ops;
35962 +static struct kgdb_io kgdboc_io_ops_console;
35963
35964 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35965 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35966 static int configured = -1;
35967
35968 static char config[MAX_CONFIG_LEN];
35969 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35970 kgdboc_unregister_kbd();
35971 if (configured == 1)
35972 kgdb_unregister_io_module(&kgdboc_io_ops);
35973 + else if (configured == 2)
35974 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35975 }
35976
35977 static int configure_kgdboc(void)
35978 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35979 int err;
35980 char *cptr = config;
35981 struct console *cons;
35982 + int is_console = 0;
35983
35984 err = kgdboc_option_setup(config);
35985 if (err || !strlen(config) || isspace(config[0]))
35986 goto noconfig;
35987
35988 err = -ENODEV;
35989 - kgdboc_io_ops.is_console = 0;
35990 kgdb_tty_driver = NULL;
35991
35992 kgdboc_use_kms = 0;
35993 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35994 int idx;
35995 if (cons->device && cons->device(cons, &idx) == p &&
35996 idx == tty_line) {
35997 - kgdboc_io_ops.is_console = 1;
35998 + is_console = 1;
35999 break;
36000 }
36001 cons = cons->next;
36002 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
36003 kgdb_tty_line = tty_line;
36004
36005 do_register:
36006 - err = kgdb_register_io_module(&kgdboc_io_ops);
36007 + if (is_console) {
36008 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
36009 + configured = 2;
36010 + } else {
36011 + err = kgdb_register_io_module(&kgdboc_io_ops);
36012 + configured = 1;
36013 + }
36014 if (err)
36015 goto noconfig;
36016
36017 - configured = 1;
36018 -
36019 return 0;
36020
36021 noconfig:
36022 @@ -213,7 +220,7 @@ noconfig:
36023 static int __init init_kgdboc(void)
36024 {
36025 /* Already configured? */
36026 - if (configured == 1)
36027 + if (configured >= 1)
36028 return 0;
36029
36030 return configure_kgdboc();
36031 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
36032 if (config[len - 1] == '\n')
36033 config[len - 1] = '\0';
36034
36035 - if (configured == 1)
36036 + if (configured >= 1)
36037 cleanup_kgdboc();
36038
36039 /* Go and configure with the new params. */
36040 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
36041 .post_exception = kgdboc_post_exp_handler,
36042 };
36043
36044 +static struct kgdb_io kgdboc_io_ops_console = {
36045 + .name = "kgdboc",
36046 + .read_char = kgdboc_get_char,
36047 + .write_char = kgdboc_put_char,
36048 + .pre_exception = kgdboc_pre_exp_handler,
36049 + .post_exception = kgdboc_post_exp_handler,
36050 + .is_console = 1
36051 +};
36052 +
36053 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
36054 /* This is only available if kgdboc is a built in for early debugging */
36055 static int __init kgdboc_early_init(char *opt)
36056 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
36057 index 05085be..67eadb0 100644
36058 --- a/drivers/tty/tty_io.c
36059 +++ b/drivers/tty/tty_io.c
36060 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
36061
36062 void tty_default_fops(struct file_operations *fops)
36063 {
36064 - *fops = tty_fops;
36065 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
36066 }
36067
36068 /*
36069 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
36070 index 8e0924f..4204eb4 100644
36071 --- a/drivers/tty/tty_ldisc.c
36072 +++ b/drivers/tty/tty_ldisc.c
36073 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
36074 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
36075 struct tty_ldisc_ops *ldo = ld->ops;
36076
36077 - ldo->refcount--;
36078 + atomic_dec(&ldo->refcount);
36079 module_put(ldo->owner);
36080 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36081
36082 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
36083 spin_lock_irqsave(&tty_ldisc_lock, flags);
36084 tty_ldiscs[disc] = new_ldisc;
36085 new_ldisc->num = disc;
36086 - new_ldisc->refcount = 0;
36087 + atomic_set(&new_ldisc->refcount, 0);
36088 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36089
36090 return ret;
36091 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
36092 return -EINVAL;
36093
36094 spin_lock_irqsave(&tty_ldisc_lock, flags);
36095 - if (tty_ldiscs[disc]->refcount)
36096 + if (atomic_read(&tty_ldiscs[disc]->refcount))
36097 ret = -EBUSY;
36098 else
36099 tty_ldiscs[disc] = NULL;
36100 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
36101 if (ldops) {
36102 ret = ERR_PTR(-EAGAIN);
36103 if (try_module_get(ldops->owner)) {
36104 - ldops->refcount++;
36105 + atomic_inc(&ldops->refcount);
36106 ret = ldops;
36107 }
36108 }
36109 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
36110 unsigned long flags;
36111
36112 spin_lock_irqsave(&tty_ldisc_lock, flags);
36113 - ldops->refcount--;
36114 + atomic_dec(&ldops->refcount);
36115 module_put(ldops->owner);
36116 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36117 }
36118 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
36119 index a605549..6bd3c96 100644
36120 --- a/drivers/tty/vt/keyboard.c
36121 +++ b/drivers/tty/vt/keyboard.c
36122 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
36123 kbd->kbdmode == VC_OFF) &&
36124 value != KVAL(K_SAK))
36125 return; /* SAK is allowed even in raw mode */
36126 +
36127 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36128 + {
36129 + void *func = fn_handler[value];
36130 + if (func == fn_show_state || func == fn_show_ptregs ||
36131 + func == fn_show_mem)
36132 + return;
36133 + }
36134 +#endif
36135 +
36136 fn_handler[value](vc);
36137 }
36138
36139 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
36140 index 65447c5..0526f0a 100644
36141 --- a/drivers/tty/vt/vt_ioctl.c
36142 +++ b/drivers/tty/vt/vt_ioctl.c
36143 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36144 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36145 return -EFAULT;
36146
36147 - if (!capable(CAP_SYS_TTY_CONFIG))
36148 - perm = 0;
36149 -
36150 switch (cmd) {
36151 case KDGKBENT:
36152 key_map = key_maps[s];
36153 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36154 val = (i ? K_HOLE : K_NOSUCHMAP);
36155 return put_user(val, &user_kbe->kb_value);
36156 case KDSKBENT:
36157 + if (!capable(CAP_SYS_TTY_CONFIG))
36158 + perm = 0;
36159 +
36160 if (!perm)
36161 return -EPERM;
36162 if (!i && v == K_NOSUCHMAP) {
36163 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36164 int i, j, k;
36165 int ret;
36166
36167 - if (!capable(CAP_SYS_TTY_CONFIG))
36168 - perm = 0;
36169 -
36170 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36171 if (!kbs) {
36172 ret = -ENOMEM;
36173 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36174 kfree(kbs);
36175 return ((p && *p) ? -EOVERFLOW : 0);
36176 case KDSKBSENT:
36177 + if (!capable(CAP_SYS_TTY_CONFIG))
36178 + perm = 0;
36179 +
36180 if (!perm) {
36181 ret = -EPERM;
36182 goto reterr;
36183 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
36184 index a783d53..cb30d94 100644
36185 --- a/drivers/uio/uio.c
36186 +++ b/drivers/uio/uio.c
36187 @@ -25,6 +25,7 @@
36188 #include <linux/kobject.h>
36189 #include <linux/cdev.h>
36190 #include <linux/uio_driver.h>
36191 +#include <asm/local.h>
36192
36193 #define UIO_MAX_DEVICES (1U << MINORBITS)
36194
36195 @@ -32,10 +33,10 @@ struct uio_device {
36196 struct module *owner;
36197 struct device *dev;
36198 int minor;
36199 - atomic_t event;
36200 + atomic_unchecked_t event;
36201 struct fasync_struct *async_queue;
36202 wait_queue_head_t wait;
36203 - int vma_count;
36204 + local_t vma_count;
36205 struct uio_info *info;
36206 struct kobject *map_dir;
36207 struct kobject *portio_dir;
36208 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36209 struct device_attribute *attr, char *buf)
36210 {
36211 struct uio_device *idev = dev_get_drvdata(dev);
36212 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36213 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36214 }
36215
36216 static struct device_attribute uio_class_attributes[] = {
36217 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36218 {
36219 struct uio_device *idev = info->uio_dev;
36220
36221 - atomic_inc(&idev->event);
36222 + atomic_inc_unchecked(&idev->event);
36223 wake_up_interruptible(&idev->wait);
36224 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36225 }
36226 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36227 }
36228
36229 listener->dev = idev;
36230 - listener->event_count = atomic_read(&idev->event);
36231 + listener->event_count = atomic_read_unchecked(&idev->event);
36232 filep->private_data = listener;
36233
36234 if (idev->info->open) {
36235 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36236 return -EIO;
36237
36238 poll_wait(filep, &idev->wait, wait);
36239 - if (listener->event_count != atomic_read(&idev->event))
36240 + if (listener->event_count != atomic_read_unchecked(&idev->event))
36241 return POLLIN | POLLRDNORM;
36242 return 0;
36243 }
36244 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36245 do {
36246 set_current_state(TASK_INTERRUPTIBLE);
36247
36248 - event_count = atomic_read(&idev->event);
36249 + event_count = atomic_read_unchecked(&idev->event);
36250 if (event_count != listener->event_count) {
36251 if (copy_to_user(buf, &event_count, count))
36252 retval = -EFAULT;
36253 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36254 static void uio_vma_open(struct vm_area_struct *vma)
36255 {
36256 struct uio_device *idev = vma->vm_private_data;
36257 - idev->vma_count++;
36258 + local_inc(&idev->vma_count);
36259 }
36260
36261 static void uio_vma_close(struct vm_area_struct *vma)
36262 {
36263 struct uio_device *idev = vma->vm_private_data;
36264 - idev->vma_count--;
36265 + local_dec(&idev->vma_count);
36266 }
36267
36268 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36269 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36270 idev->owner = owner;
36271 idev->info = info;
36272 init_waitqueue_head(&idev->wait);
36273 - atomic_set(&idev->event, 0);
36274 + atomic_set_unchecked(&idev->event, 0);
36275
36276 ret = uio_get_minor(idev);
36277 if (ret)
36278 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36279 index a845f8b..4f54072 100644
36280 --- a/drivers/usb/atm/cxacru.c
36281 +++ b/drivers/usb/atm/cxacru.c
36282 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36283 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36284 if (ret < 2)
36285 return -EINVAL;
36286 - if (index < 0 || index > 0x7f)
36287 + if (index > 0x7f)
36288 return -EINVAL;
36289 pos += tmp;
36290
36291 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36292 index d3448ca..d2864ca 100644
36293 --- a/drivers/usb/atm/usbatm.c
36294 +++ b/drivers/usb/atm/usbatm.c
36295 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36296 if (printk_ratelimit())
36297 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36298 __func__, vpi, vci);
36299 - atomic_inc(&vcc->stats->rx_err);
36300 + atomic_inc_unchecked(&vcc->stats->rx_err);
36301 return;
36302 }
36303
36304 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36305 if (length > ATM_MAX_AAL5_PDU) {
36306 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36307 __func__, length, vcc);
36308 - atomic_inc(&vcc->stats->rx_err);
36309 + atomic_inc_unchecked(&vcc->stats->rx_err);
36310 goto out;
36311 }
36312
36313 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36314 if (sarb->len < pdu_length) {
36315 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36316 __func__, pdu_length, sarb->len, vcc);
36317 - atomic_inc(&vcc->stats->rx_err);
36318 + atomic_inc_unchecked(&vcc->stats->rx_err);
36319 goto out;
36320 }
36321
36322 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36323 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36324 __func__, vcc);
36325 - atomic_inc(&vcc->stats->rx_err);
36326 + atomic_inc_unchecked(&vcc->stats->rx_err);
36327 goto out;
36328 }
36329
36330 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36331 if (printk_ratelimit())
36332 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36333 __func__, length);
36334 - atomic_inc(&vcc->stats->rx_drop);
36335 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36336 goto out;
36337 }
36338
36339 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36340
36341 vcc->push(vcc, skb);
36342
36343 - atomic_inc(&vcc->stats->rx);
36344 + atomic_inc_unchecked(&vcc->stats->rx);
36345 out:
36346 skb_trim(sarb, 0);
36347 }
36348 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36349 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36350
36351 usbatm_pop(vcc, skb);
36352 - atomic_inc(&vcc->stats->tx);
36353 + atomic_inc_unchecked(&vcc->stats->tx);
36354
36355 skb = skb_dequeue(&instance->sndqueue);
36356 }
36357 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36358 if (!left--)
36359 return sprintf(page,
36360 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36361 - atomic_read(&atm_dev->stats.aal5.tx),
36362 - atomic_read(&atm_dev->stats.aal5.tx_err),
36363 - atomic_read(&atm_dev->stats.aal5.rx),
36364 - atomic_read(&atm_dev->stats.aal5.rx_err),
36365 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36366 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36367 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36368 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36369 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36370 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36371
36372 if (!left--) {
36373 if (instance->disconnected)
36374 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36375 index d956965..4179a77 100644
36376 --- a/drivers/usb/core/devices.c
36377 +++ b/drivers/usb/core/devices.c
36378 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36379 * time it gets called.
36380 */
36381 static struct device_connect_event {
36382 - atomic_t count;
36383 + atomic_unchecked_t count;
36384 wait_queue_head_t wait;
36385 } device_event = {
36386 .count = ATOMIC_INIT(1),
36387 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36388
36389 void usbfs_conn_disc_event(void)
36390 {
36391 - atomic_add(2, &device_event.count);
36392 + atomic_add_unchecked(2, &device_event.count);
36393 wake_up(&device_event.wait);
36394 }
36395
36396 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36397
36398 poll_wait(file, &device_event.wait, wait);
36399
36400 - event_count = atomic_read(&device_event.count);
36401 + event_count = atomic_read_unchecked(&device_event.count);
36402 if (file->f_version != event_count) {
36403 file->f_version = event_count;
36404 return POLLIN | POLLRDNORM;
36405 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36406 index b3bdfed..a9460e0 100644
36407 --- a/drivers/usb/core/message.c
36408 +++ b/drivers/usb/core/message.c
36409 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36410 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36411 if (buf) {
36412 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36413 - if (len > 0) {
36414 - smallbuf = kmalloc(++len, GFP_NOIO);
36415 + if (len++ > 0) {
36416 + smallbuf = kmalloc(len, GFP_NOIO);
36417 if (!smallbuf)
36418 return buf;
36419 memcpy(smallbuf, buf, len);
36420 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36421 index 1fc8f12..20647c1 100644
36422 --- a/drivers/usb/early/ehci-dbgp.c
36423 +++ b/drivers/usb/early/ehci-dbgp.c
36424 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36425
36426 #ifdef CONFIG_KGDB
36427 static struct kgdb_io kgdbdbgp_io_ops;
36428 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36429 +static struct kgdb_io kgdbdbgp_io_ops_console;
36430 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36431 #else
36432 #define dbgp_kgdb_mode (0)
36433 #endif
36434 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36435 .write_char = kgdbdbgp_write_char,
36436 };
36437
36438 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36439 + .name = "kgdbdbgp",
36440 + .read_char = kgdbdbgp_read_char,
36441 + .write_char = kgdbdbgp_write_char,
36442 + .is_console = 1
36443 +};
36444 +
36445 static int kgdbdbgp_wait_time;
36446
36447 static int __init kgdbdbgp_parse_config(char *str)
36448 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36449 ptr++;
36450 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36451 }
36452 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36453 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36454 + if (early_dbgp_console.index != -1)
36455 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36456 + else
36457 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36458
36459 return 0;
36460 }
36461 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36462 index d6bea3e..60b250e 100644
36463 --- a/drivers/usb/wusbcore/wa-hc.h
36464 +++ b/drivers/usb/wusbcore/wa-hc.h
36465 @@ -192,7 +192,7 @@ struct wahc {
36466 struct list_head xfer_delayed_list;
36467 spinlock_t xfer_list_lock;
36468 struct work_struct xfer_work;
36469 - atomic_t xfer_id_count;
36470 + atomic_unchecked_t xfer_id_count;
36471 };
36472
36473
36474 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36475 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36476 spin_lock_init(&wa->xfer_list_lock);
36477 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36478 - atomic_set(&wa->xfer_id_count, 1);
36479 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36480 }
36481
36482 /**
36483 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36484 index 57c01ab..8a05959 100644
36485 --- a/drivers/usb/wusbcore/wa-xfer.c
36486 +++ b/drivers/usb/wusbcore/wa-xfer.c
36487 @@ -296,7 +296,7 @@ out:
36488 */
36489 static void wa_xfer_id_init(struct wa_xfer *xfer)
36490 {
36491 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36492 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36493 }
36494
36495 /*
36496 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36497 index c14c42b..f955cc2 100644
36498 --- a/drivers/vhost/vhost.c
36499 +++ b/drivers/vhost/vhost.c
36500 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36501 return 0;
36502 }
36503
36504 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36505 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36506 {
36507 struct file *eventfp, *filep = NULL,
36508 *pollstart = NULL, *pollstop = NULL;
36509 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36510 index b0b2ac3..89a4399 100644
36511 --- a/drivers/video/aty/aty128fb.c
36512 +++ b/drivers/video/aty/aty128fb.c
36513 @@ -148,7 +148,7 @@ enum {
36514 };
36515
36516 /* Must match above enum */
36517 -static const char *r128_family[] __devinitdata = {
36518 +static const char *r128_family[] __devinitconst = {
36519 "AGP",
36520 "PCI",
36521 "PRO AGP",
36522 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36523 index 5c3960d..15cf8fc 100644
36524 --- a/drivers/video/fbcmap.c
36525 +++ b/drivers/video/fbcmap.c
36526 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36527 rc = -ENODEV;
36528 goto out;
36529 }
36530 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36531 - !info->fbops->fb_setcmap)) {
36532 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36533 rc = -EINVAL;
36534 goto out1;
36535 }
36536 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36537 index ad93629..e020fc3 100644
36538 --- a/drivers/video/fbmem.c
36539 +++ b/drivers/video/fbmem.c
36540 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36541 image->dx += image->width + 8;
36542 }
36543 } else if (rotate == FB_ROTATE_UD) {
36544 - for (x = 0; x < num && image->dx >= 0; x++) {
36545 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36546 info->fbops->fb_imageblit(info, image);
36547 image->dx -= image->width + 8;
36548 }
36549 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36550 image->dy += image->height + 8;
36551 }
36552 } else if (rotate == FB_ROTATE_CCW) {
36553 - for (x = 0; x < num && image->dy >= 0; x++) {
36554 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36555 info->fbops->fb_imageblit(info, image);
36556 image->dy -= image->height + 8;
36557 }
36558 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36559 return -EFAULT;
36560 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36561 return -EINVAL;
36562 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36563 + if (con2fb.framebuffer >= FB_MAX)
36564 return -EINVAL;
36565 if (!registered_fb[con2fb.framebuffer])
36566 request_module("fb%d", con2fb.framebuffer);
36567 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36568 index 5a5d092..265c5ed 100644
36569 --- a/drivers/video/geode/gx1fb_core.c
36570 +++ b/drivers/video/geode/gx1fb_core.c
36571 @@ -29,7 +29,7 @@ static int crt_option = 1;
36572 static char panel_option[32] = "";
36573
36574 /* Modes relevant to the GX1 (taken from modedb.c) */
36575 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36576 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36577 /* 640x480-60 VESA */
36578 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36579 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36580 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36581 index 0fad23f..0e9afa4 100644
36582 --- a/drivers/video/gxt4500.c
36583 +++ b/drivers/video/gxt4500.c
36584 @@ -156,7 +156,7 @@ struct gxt4500_par {
36585 static char *mode_option;
36586
36587 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36588 -static const struct fb_videomode defaultmode __devinitdata = {
36589 +static const struct fb_videomode defaultmode __devinitconst = {
36590 .refresh = 60,
36591 .xres = 1280,
36592 .yres = 1024,
36593 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36594 return 0;
36595 }
36596
36597 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36598 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36599 .id = "IBM GXT4500P",
36600 .type = FB_TYPE_PACKED_PIXELS,
36601 .visual = FB_VISUAL_PSEUDOCOLOR,
36602 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36603 index 7672d2e..b56437f 100644
36604 --- a/drivers/video/i810/i810_accel.c
36605 +++ b/drivers/video/i810/i810_accel.c
36606 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36607 }
36608 }
36609 printk("ringbuffer lockup!!!\n");
36610 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36611 i810_report_error(mmio);
36612 par->dev_flags |= LOCKUP;
36613 info->pixmap.scan_align = 1;
36614 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36615 index 318f6fb..9a389c1 100644
36616 --- a/drivers/video/i810/i810_main.c
36617 +++ b/drivers/video/i810/i810_main.c
36618 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36619 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36620
36621 /* PCI */
36622 -static const char *i810_pci_list[] __devinitdata = {
36623 +static const char *i810_pci_list[] __devinitconst = {
36624 "Intel(R) 810 Framebuffer Device" ,
36625 "Intel(R) 810-DC100 Framebuffer Device" ,
36626 "Intel(R) 810E Framebuffer Device" ,
36627 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36628 index de36693..3c63fc2 100644
36629 --- a/drivers/video/jz4740_fb.c
36630 +++ b/drivers/video/jz4740_fb.c
36631 @@ -136,7 +136,7 @@ struct jzfb {
36632 uint32_t pseudo_palette[16];
36633 };
36634
36635 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36636 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36637 .id = "JZ4740 FB",
36638 .type = FB_TYPE_PACKED_PIXELS,
36639 .visual = FB_VISUAL_TRUECOLOR,
36640 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36641 index 3c14e43..eafa544 100644
36642 --- a/drivers/video/logo/logo_linux_clut224.ppm
36643 +++ b/drivers/video/logo/logo_linux_clut224.ppm
36644 @@ -1,1604 +1,1123 @@
36645 P3
36646 -# Standard 224-color Linux logo
36647 80 80
36648 255
36649 - 0 0 0 0 0 0 0 0 0 0 0 0
36650 - 0 0 0 0 0 0 0 0 0 0 0 0
36651 - 0 0 0 0 0 0 0 0 0 0 0 0
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 0 0 0 0 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 0 0 0
36656 - 0 0 0 0 0 0 0 0 0 0 0 0
36657 - 0 0 0 0 0 0 0 0 0 0 0 0
36658 - 6 6 6 6 6 6 10 10 10 10 10 10
36659 - 10 10 10 6 6 6 6 6 6 6 6 6
36660 - 0 0 0 0 0 0 0 0 0 0 0 0
36661 - 0 0 0 0 0 0 0 0 0 0 0 0
36662 - 0 0 0 0 0 0 0 0 0 0 0 0
36663 - 0 0 0 0 0 0 0 0 0 0 0 0
36664 - 0 0 0 0 0 0 0 0 0 0 0 0
36665 - 0 0 0 0 0 0 0 0 0 0 0 0
36666 - 0 0 0 0 0 0 0 0 0 0 0 0
36667 - 0 0 0 0 0 0 0 0 0 0 0 0
36668 - 0 0 0 0 0 0 0 0 0 0 0 0
36669 - 0 0 0 0 0 0 0 0 0 0 0 0
36670 - 0 0 0 0 0 0 0 0 0 0 0 0
36671 - 0 0 0 0 0 0 0 0 0 0 0 0
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 0 0 0 0 0 0 0 0 0
36677 - 0 0 0 6 6 6 10 10 10 14 14 14
36678 - 22 22 22 26 26 26 30 30 30 34 34 34
36679 - 30 30 30 30 30 30 26 26 26 18 18 18
36680 - 14 14 14 10 10 10 6 6 6 0 0 0
36681 - 0 0 0 0 0 0 0 0 0 0 0 0
36682 - 0 0 0 0 0 0 0 0 0 0 0 0
36683 - 0 0 0 0 0 0 0 0 0 0 0 0
36684 - 0 0 0 0 0 0 0 0 0 0 0 0
36685 - 0 0 0 0 0 0 0 0 0 0 0 0
36686 - 0 0 0 0 0 0 0 0 0 0 0 0
36687 - 0 0 0 0 0 0 0 0 0 0 0 0
36688 - 0 0 0 0 0 0 0 0 0 0 0 0
36689 - 0 0 0 0 0 0 0 0 0 0 0 0
36690 - 0 0 0 0 0 1 0 0 1 0 0 0
36691 - 0 0 0 0 0 0 0 0 0 0 0 0
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 0 0 0 0 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 0 0 0 0 0 0 0 0 0 0 0 0
36697 - 6 6 6 14 14 14 26 26 26 42 42 42
36698 - 54 54 54 66 66 66 78 78 78 78 78 78
36699 - 78 78 78 74 74 74 66 66 66 54 54 54
36700 - 42 42 42 26 26 26 18 18 18 10 10 10
36701 - 6 6 6 0 0 0 0 0 0 0 0 0
36702 - 0 0 0 0 0 0 0 0 0 0 0 0
36703 - 0 0 0 0 0 0 0 0 0 0 0 0
36704 - 0 0 0 0 0 0 0 0 0 0 0 0
36705 - 0 0 0 0 0 0 0 0 0 0 0 0
36706 - 0 0 0 0 0 0 0 0 0 0 0 0
36707 - 0 0 0 0 0 0 0 0 0 0 0 0
36708 - 0 0 0 0 0 0 0 0 0 0 0 0
36709 - 0 0 0 0 0 0 0 0 0 0 0 0
36710 - 0 0 1 0 0 0 0 0 0 0 0 0
36711 - 0 0 0 0 0 0 0 0 0 0 0 0
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 0 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 0 0 0 0 0 0 0 0 0 10 10 10
36717 - 22 22 22 42 42 42 66 66 66 86 86 86
36718 - 66 66 66 38 38 38 38 38 38 22 22 22
36719 - 26 26 26 34 34 34 54 54 54 66 66 66
36720 - 86 86 86 70 70 70 46 46 46 26 26 26
36721 - 14 14 14 6 6 6 0 0 0 0 0 0
36722 - 0 0 0 0 0 0 0 0 0 0 0 0
36723 - 0 0 0 0 0 0 0 0 0 0 0 0
36724 - 0 0 0 0 0 0 0 0 0 0 0 0
36725 - 0 0 0 0 0 0 0 0 0 0 0 0
36726 - 0 0 0 0 0 0 0 0 0 0 0 0
36727 - 0 0 0 0 0 0 0 0 0 0 0 0
36728 - 0 0 0 0 0 0 0 0 0 0 0 0
36729 - 0 0 0 0 0 0 0 0 0 0 0 0
36730 - 0 0 1 0 0 1 0 0 1 0 0 0
36731 - 0 0 0 0 0 0 0 0 0 0 0 0
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 0 0 0 0 0 0 0 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 0 0 0 0 0 0 10 10 10 26 26 26
36737 - 50 50 50 82 82 82 58 58 58 6 6 6
36738 - 2 2 6 2 2 6 2 2 6 2 2 6
36739 - 2 2 6 2 2 6 2 2 6 2 2 6
36740 - 6 6 6 54 54 54 86 86 86 66 66 66
36741 - 38 38 38 18 18 18 6 6 6 0 0 0
36742 - 0 0 0 0 0 0 0 0 0 0 0 0
36743 - 0 0 0 0 0 0 0 0 0 0 0 0
36744 - 0 0 0 0 0 0 0 0 0 0 0 0
36745 - 0 0 0 0 0 0 0 0 0 0 0 0
36746 - 0 0 0 0 0 0 0 0 0 0 0 0
36747 - 0 0 0 0 0 0 0 0 0 0 0 0
36748 - 0 0 0 0 0 0 0 0 0 0 0 0
36749 - 0 0 0 0 0 0 0 0 0 0 0 0
36750 - 0 0 0 0 0 0 0 0 0 0 0 0
36751 - 0 0 0 0 0 0 0 0 0 0 0 0
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 0 0 0
36756 - 0 0 0 6 6 6 22 22 22 50 50 50
36757 - 78 78 78 34 34 34 2 2 6 2 2 6
36758 - 2 2 6 2 2 6 2 2 6 2 2 6
36759 - 2 2 6 2 2 6 2 2 6 2 2 6
36760 - 2 2 6 2 2 6 6 6 6 70 70 70
36761 - 78 78 78 46 46 46 22 22 22 6 6 6
36762 - 0 0 0 0 0 0 0 0 0 0 0 0
36763 - 0 0 0 0 0 0 0 0 0 0 0 0
36764 - 0 0 0 0 0 0 0 0 0 0 0 0
36765 - 0 0 0 0 0 0 0 0 0 0 0 0
36766 - 0 0 0 0 0 0 0 0 0 0 0 0
36767 - 0 0 0 0 0 0 0 0 0 0 0 0
36768 - 0 0 0 0 0 0 0 0 0 0 0 0
36769 - 0 0 0 0 0 0 0 0 0 0 0 0
36770 - 0 0 1 0 0 1 0 0 1 0 0 0
36771 - 0 0 0 0 0 0 0 0 0 0 0 0
36772 - 0 0 0 0 0 0 0 0 0 0 0 0
36773 - 0 0 0 0 0 0 0 0 0 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 0 0 0
36776 - 6 6 6 18 18 18 42 42 42 82 82 82
36777 - 26 26 26 2 2 6 2 2 6 2 2 6
36778 - 2 2 6 2 2 6 2 2 6 2 2 6
36779 - 2 2 6 2 2 6 2 2 6 14 14 14
36780 - 46 46 46 34 34 34 6 6 6 2 2 6
36781 - 42 42 42 78 78 78 42 42 42 18 18 18
36782 - 6 6 6 0 0 0 0 0 0 0 0 0
36783 - 0 0 0 0 0 0 0 0 0 0 0 0
36784 - 0 0 0 0 0 0 0 0 0 0 0 0
36785 - 0 0 0 0 0 0 0 0 0 0 0 0
36786 - 0 0 0 0 0 0 0 0 0 0 0 0
36787 - 0 0 0 0 0 0 0 0 0 0 0 0
36788 - 0 0 0 0 0 0 0 0 0 0 0 0
36789 - 0 0 0 0 0 0 0 0 0 0 0 0
36790 - 0 0 1 0 0 0 0 0 1 0 0 0
36791 - 0 0 0 0 0 0 0 0 0 0 0 0
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 0 0 0 0 0 0 0 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 0 0 0
36796 - 10 10 10 30 30 30 66 66 66 58 58 58
36797 - 2 2 6 2 2 6 2 2 6 2 2 6
36798 - 2 2 6 2 2 6 2 2 6 2 2 6
36799 - 2 2 6 2 2 6 2 2 6 26 26 26
36800 - 86 86 86 101 101 101 46 46 46 10 10 10
36801 - 2 2 6 58 58 58 70 70 70 34 34 34
36802 - 10 10 10 0 0 0 0 0 0 0 0 0
36803 - 0 0 0 0 0 0 0 0 0 0 0 0
36804 - 0 0 0 0 0 0 0 0 0 0 0 0
36805 - 0 0 0 0 0 0 0 0 0 0 0 0
36806 - 0 0 0 0 0 0 0 0 0 0 0 0
36807 - 0 0 0 0 0 0 0 0 0 0 0 0
36808 - 0 0 0 0 0 0 0 0 0 0 0 0
36809 - 0 0 0 0 0 0 0 0 0 0 0 0
36810 - 0 0 1 0 0 1 0 0 1 0 0 0
36811 - 0 0 0 0 0 0 0 0 0 0 0 0
36812 - 0 0 0 0 0 0 0 0 0 0 0 0
36813 - 0 0 0 0 0 0 0 0 0 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 0 0 0
36816 - 14 14 14 42 42 42 86 86 86 10 10 10
36817 - 2 2 6 2 2 6 2 2 6 2 2 6
36818 - 2 2 6 2 2 6 2 2 6 2 2 6
36819 - 2 2 6 2 2 6 2 2 6 30 30 30
36820 - 94 94 94 94 94 94 58 58 58 26 26 26
36821 - 2 2 6 6 6 6 78 78 78 54 54 54
36822 - 22 22 22 6 6 6 0 0 0 0 0 0
36823 - 0 0 0 0 0 0 0 0 0 0 0 0
36824 - 0 0 0 0 0 0 0 0 0 0 0 0
36825 - 0 0 0 0 0 0 0 0 0 0 0 0
36826 - 0 0 0 0 0 0 0 0 0 0 0 0
36827 - 0 0 0 0 0 0 0 0 0 0 0 0
36828 - 0 0 0 0 0 0 0 0 0 0 0 0
36829 - 0 0 0 0 0 0 0 0 0 0 0 0
36830 - 0 0 0 0 0 0 0 0 0 0 0 0
36831 - 0 0 0 0 0 0 0 0 0 0 0 0
36832 - 0 0 0 0 0 0 0 0 0 0 0 0
36833 - 0 0 0 0 0 0 0 0 0 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 6 6 6
36836 - 22 22 22 62 62 62 62 62 62 2 2 6
36837 - 2 2 6 2 2 6 2 2 6 2 2 6
36838 - 2 2 6 2 2 6 2 2 6 2 2 6
36839 - 2 2 6 2 2 6 2 2 6 26 26 26
36840 - 54 54 54 38 38 38 18 18 18 10 10 10
36841 - 2 2 6 2 2 6 34 34 34 82 82 82
36842 - 38 38 38 14 14 14 0 0 0 0 0 0
36843 - 0 0 0 0 0 0 0 0 0 0 0 0
36844 - 0 0 0 0 0 0 0 0 0 0 0 0
36845 - 0 0 0 0 0 0 0 0 0 0 0 0
36846 - 0 0 0 0 0 0 0 0 0 0 0 0
36847 - 0 0 0 0 0 0 0 0 0 0 0 0
36848 - 0 0 0 0 0 0 0 0 0 0 0 0
36849 - 0 0 0 0 0 0 0 0 0 0 0 0
36850 - 0 0 0 0 0 1 0 0 1 0 0 0
36851 - 0 0 0 0 0 0 0 0 0 0 0 0
36852 - 0 0 0 0 0 0 0 0 0 0 0 0
36853 - 0 0 0 0 0 0 0 0 0 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 6 6 6
36856 - 30 30 30 78 78 78 30 30 30 2 2 6
36857 - 2 2 6 2 2 6 2 2 6 2 2 6
36858 - 2 2 6 2 2 6 2 2 6 2 2 6
36859 - 2 2 6 2 2 6 2 2 6 10 10 10
36860 - 10 10 10 2 2 6 2 2 6 2 2 6
36861 - 2 2 6 2 2 6 2 2 6 78 78 78
36862 - 50 50 50 18 18 18 6 6 6 0 0 0
36863 - 0 0 0 0 0 0 0 0 0 0 0 0
36864 - 0 0 0 0 0 0 0 0 0 0 0 0
36865 - 0 0 0 0 0 0 0 0 0 0 0 0
36866 - 0 0 0 0 0 0 0 0 0 0 0 0
36867 - 0 0 0 0 0 0 0 0 0 0 0 0
36868 - 0 0 0 0 0 0 0 0 0 0 0 0
36869 - 0 0 0 0 0 0 0 0 0 0 0 0
36870 - 0 0 1 0 0 0 0 0 0 0 0 0
36871 - 0 0 0 0 0 0 0 0 0 0 0 0
36872 - 0 0 0 0 0 0 0 0 0 0 0 0
36873 - 0 0 0 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 10 10 10
36876 - 38 38 38 86 86 86 14 14 14 2 2 6
36877 - 2 2 6 2 2 6 2 2 6 2 2 6
36878 - 2 2 6 2 2 6 2 2 6 2 2 6
36879 - 2 2 6 2 2 6 2 2 6 2 2 6
36880 - 2 2 6 2 2 6 2 2 6 2 2 6
36881 - 2 2 6 2 2 6 2 2 6 54 54 54
36882 - 66 66 66 26 26 26 6 6 6 0 0 0
36883 - 0 0 0 0 0 0 0 0 0 0 0 0
36884 - 0 0 0 0 0 0 0 0 0 0 0 0
36885 - 0 0 0 0 0 0 0 0 0 0 0 0
36886 - 0 0 0 0 0 0 0 0 0 0 0 0
36887 - 0 0 0 0 0 0 0 0 0 0 0 0
36888 - 0 0 0 0 0 0 0 0 0 0 0 0
36889 - 0 0 0 0 0 0 0 0 0 0 0 0
36890 - 0 0 0 0 0 1 0 0 1 0 0 0
36891 - 0 0 0 0 0 0 0 0 0 0 0 0
36892 - 0 0 0 0 0 0 0 0 0 0 0 0
36893 - 0 0 0 0 0 0 0 0 0 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 14 14 14
36896 - 42 42 42 82 82 82 2 2 6 2 2 6
36897 - 2 2 6 6 6 6 10 10 10 2 2 6
36898 - 2 2 6 2 2 6 2 2 6 2 2 6
36899 - 2 2 6 2 2 6 2 2 6 6 6 6
36900 - 14 14 14 10 10 10 2 2 6 2 2 6
36901 - 2 2 6 2 2 6 2 2 6 18 18 18
36902 - 82 82 82 34 34 34 10 10 10 0 0 0
36903 - 0 0 0 0 0 0 0 0 0 0 0 0
36904 - 0 0 0 0 0 0 0 0 0 0 0 0
36905 - 0 0 0 0 0 0 0 0 0 0 0 0
36906 - 0 0 0 0 0 0 0 0 0 0 0 0
36907 - 0 0 0 0 0 0 0 0 0 0 0 0
36908 - 0 0 0 0 0 0 0 0 0 0 0 0
36909 - 0 0 0 0 0 0 0 0 0 0 0 0
36910 - 0 0 1 0 0 0 0 0 0 0 0 0
36911 - 0 0 0 0 0 0 0 0 0 0 0 0
36912 - 0 0 0 0 0 0 0 0 0 0 0 0
36913 - 0 0 0 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 14 14 14
36916 - 46 46 46 86 86 86 2 2 6 2 2 6
36917 - 6 6 6 6 6 6 22 22 22 34 34 34
36918 - 6 6 6 2 2 6 2 2 6 2 2 6
36919 - 2 2 6 2 2 6 18 18 18 34 34 34
36920 - 10 10 10 50 50 50 22 22 22 2 2 6
36921 - 2 2 6 2 2 6 2 2 6 10 10 10
36922 - 86 86 86 42 42 42 14 14 14 0 0 0
36923 - 0 0 0 0 0 0 0 0 0 0 0 0
36924 - 0 0 0 0 0 0 0 0 0 0 0 0
36925 - 0 0 0 0 0 0 0 0 0 0 0 0
36926 - 0 0 0 0 0 0 0 0 0 0 0 0
36927 - 0 0 0 0 0 0 0 0 0 0 0 0
36928 - 0 0 0 0 0 0 0 0 0 0 0 0
36929 - 0 0 0 0 0 0 0 0 0 0 0 0
36930 - 0 0 1 0 0 1 0 0 1 0 0 0
36931 - 0 0 0 0 0 0 0 0 0 0 0 0
36932 - 0 0 0 0 0 0 0 0 0 0 0 0
36933 - 0 0 0 0 0 0 0 0 0 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 14 14 14
36936 - 46 46 46 86 86 86 2 2 6 2 2 6
36937 - 38 38 38 116 116 116 94 94 94 22 22 22
36938 - 22 22 22 2 2 6 2 2 6 2 2 6
36939 - 14 14 14 86 86 86 138 138 138 162 162 162
36940 -154 154 154 38 38 38 26 26 26 6 6 6
36941 - 2 2 6 2 2 6 2 2 6 2 2 6
36942 - 86 86 86 46 46 46 14 14 14 0 0 0
36943 - 0 0 0 0 0 0 0 0 0 0 0 0
36944 - 0 0 0 0 0 0 0 0 0 0 0 0
36945 - 0 0 0 0 0 0 0 0 0 0 0 0
36946 - 0 0 0 0 0 0 0 0 0 0 0 0
36947 - 0 0 0 0 0 0 0 0 0 0 0 0
36948 - 0 0 0 0 0 0 0 0 0 0 0 0
36949 - 0 0 0 0 0 0 0 0 0 0 0 0
36950 - 0 0 0 0 0 0 0 0 0 0 0 0
36951 - 0 0 0 0 0 0 0 0 0 0 0 0
36952 - 0 0 0 0 0 0 0 0 0 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 14 14 14
36956 - 46 46 46 86 86 86 2 2 6 14 14 14
36957 -134 134 134 198 198 198 195 195 195 116 116 116
36958 - 10 10 10 2 2 6 2 2 6 6 6 6
36959 -101 98 89 187 187 187 210 210 210 218 218 218
36960 -214 214 214 134 134 134 14 14 14 6 6 6
36961 - 2 2 6 2 2 6 2 2 6 2 2 6
36962 - 86 86 86 50 50 50 18 18 18 6 6 6
36963 - 0 0 0 0 0 0 0 0 0 0 0 0
36964 - 0 0 0 0 0 0 0 0 0 0 0 0
36965 - 0 0 0 0 0 0 0 0 0 0 0 0
36966 - 0 0 0 0 0 0 0 0 0 0 0 0
36967 - 0 0 0 0 0 0 0 0 0 0 0 0
36968 - 0 0 0 0 0 0 0 0 0 0 0 0
36969 - 0 0 0 0 0 0 0 0 1 0 0 0
36970 - 0 0 1 0 0 1 0 0 1 0 0 0
36971 - 0 0 0 0 0 0 0 0 0 0 0 0
36972 - 0 0 0 0 0 0 0 0 0 0 0 0
36973 - 0 0 0 0 0 0 0 0 0 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 0 0 0
36975 - 0 0 0 0 0 0 0 0 0 14 14 14
36976 - 46 46 46 86 86 86 2 2 6 54 54 54
36977 -218 218 218 195 195 195 226 226 226 246 246 246
36978 - 58 58 58 2 2 6 2 2 6 30 30 30
36979 -210 210 210 253 253 253 174 174 174 123 123 123
36980 -221 221 221 234 234 234 74 74 74 2 2 6
36981 - 2 2 6 2 2 6 2 2 6 2 2 6
36982 - 70 70 70 58 58 58 22 22 22 6 6 6
36983 - 0 0 0 0 0 0 0 0 0 0 0 0
36984 - 0 0 0 0 0 0 0 0 0 0 0 0
36985 - 0 0 0 0 0 0 0 0 0 0 0 0
36986 - 0 0 0 0 0 0 0 0 0 0 0 0
36987 - 0 0 0 0 0 0 0 0 0 0 0 0
36988 - 0 0 0 0 0 0 0 0 0 0 0 0
36989 - 0 0 0 0 0 0 0 0 0 0 0 0
36990 - 0 0 0 0 0 0 0 0 0 0 0 0
36991 - 0 0 0 0 0 0 0 0 0 0 0 0
36992 - 0 0 0 0 0 0 0 0 0 0 0 0
36993 - 0 0 0 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 0 0 0 0 0 0
36995 - 0 0 0 0 0 0 0 0 0 14 14 14
36996 - 46 46 46 82 82 82 2 2 6 106 106 106
36997 -170 170 170 26 26 26 86 86 86 226 226 226
36998 -123 123 123 10 10 10 14 14 14 46 46 46
36999 -231 231 231 190 190 190 6 6 6 70 70 70
37000 - 90 90 90 238 238 238 158 158 158 2 2 6
37001 - 2 2 6 2 2 6 2 2 6 2 2 6
37002 - 70 70 70 58 58 58 22 22 22 6 6 6
37003 - 0 0 0 0 0 0 0 0 0 0 0 0
37004 - 0 0 0 0 0 0 0 0 0 0 0 0
37005 - 0 0 0 0 0 0 0 0 0 0 0 0
37006 - 0 0 0 0 0 0 0 0 0 0 0 0
37007 - 0 0 0 0 0 0 0 0 0 0 0 0
37008 - 0 0 0 0 0 0 0 0 0 0 0 0
37009 - 0 0 0 0 0 0 0 0 1 0 0 0
37010 - 0 0 1 0 0 1 0 0 1 0 0 0
37011 - 0 0 0 0 0 0 0 0 0 0 0 0
37012 - 0 0 0 0 0 0 0 0 0 0 0 0
37013 - 0 0 0 0 0 0 0 0 0 0 0 0
37014 - 0 0 0 0 0 0 0 0 0 0 0 0
37015 - 0 0 0 0 0 0 0 0 0 14 14 14
37016 - 42 42 42 86 86 86 6 6 6 116 116 116
37017 -106 106 106 6 6 6 70 70 70 149 149 149
37018 -128 128 128 18 18 18 38 38 38 54 54 54
37019 -221 221 221 106 106 106 2 2 6 14 14 14
37020 - 46 46 46 190 190 190 198 198 198 2 2 6
37021 - 2 2 6 2 2 6 2 2 6 2 2 6
37022 - 74 74 74 62 62 62 22 22 22 6 6 6
37023 - 0 0 0 0 0 0 0 0 0 0 0 0
37024 - 0 0 0 0 0 0 0 0 0 0 0 0
37025 - 0 0 0 0 0 0 0 0 0 0 0 0
37026 - 0 0 0 0 0 0 0 0 0 0 0 0
37027 - 0 0 0 0 0 0 0 0 0 0 0 0
37028 - 0 0 0 0 0 0 0 0 0 0 0 0
37029 - 0 0 0 0 0 0 0 0 1 0 0 0
37030 - 0 0 1 0 0 0 0 0 1 0 0 0
37031 - 0 0 0 0 0 0 0 0 0 0 0 0
37032 - 0 0 0 0 0 0 0 0 0 0 0 0
37033 - 0 0 0 0 0 0 0 0 0 0 0 0
37034 - 0 0 0 0 0 0 0 0 0 0 0 0
37035 - 0 0 0 0 0 0 0 0 0 14 14 14
37036 - 42 42 42 94 94 94 14 14 14 101 101 101
37037 -128 128 128 2 2 6 18 18 18 116 116 116
37038 -118 98 46 121 92 8 121 92 8 98 78 10
37039 -162 162 162 106 106 106 2 2 6 2 2 6
37040 - 2 2 6 195 195 195 195 195 195 6 6 6
37041 - 2 2 6 2 2 6 2 2 6 2 2 6
37042 - 74 74 74 62 62 62 22 22 22 6 6 6
37043 - 0 0 0 0 0 0 0 0 0 0 0 0
37044 - 0 0 0 0 0 0 0 0 0 0 0 0
37045 - 0 0 0 0 0 0 0 0 0 0 0 0
37046 - 0 0 0 0 0 0 0 0 0 0 0 0
37047 - 0 0 0 0 0 0 0 0 0 0 0 0
37048 - 0 0 0 0 0 0 0 0 0 0 0 0
37049 - 0 0 0 0 0 0 0 0 1 0 0 1
37050 - 0 0 1 0 0 0 0 0 1 0 0 0
37051 - 0 0 0 0 0 0 0 0 0 0 0 0
37052 - 0 0 0 0 0 0 0 0 0 0 0 0
37053 - 0 0 0 0 0 0 0 0 0 0 0 0
37054 - 0 0 0 0 0 0 0 0 0 0 0 0
37055 - 0 0 0 0 0 0 0 0 0 10 10 10
37056 - 38 38 38 90 90 90 14 14 14 58 58 58
37057 -210 210 210 26 26 26 54 38 6 154 114 10
37058 -226 170 11 236 186 11 225 175 15 184 144 12
37059 -215 174 15 175 146 61 37 26 9 2 2 6
37060 - 70 70 70 246 246 246 138 138 138 2 2 6
37061 - 2 2 6 2 2 6 2 2 6 2 2 6
37062 - 70 70 70 66 66 66 26 26 26 6 6 6
37063 - 0 0 0 0 0 0 0 0 0 0 0 0
37064 - 0 0 0 0 0 0 0 0 0 0 0 0
37065 - 0 0 0 0 0 0 0 0 0 0 0 0
37066 - 0 0 0 0 0 0 0 0 0 0 0 0
37067 - 0 0 0 0 0 0 0 0 0 0 0 0
37068 - 0 0 0 0 0 0 0 0 0 0 0 0
37069 - 0 0 0 0 0 0 0 0 0 0 0 0
37070 - 0 0 0 0 0 0 0 0 0 0 0 0
37071 - 0 0 0 0 0 0 0 0 0 0 0 0
37072 - 0 0 0 0 0 0 0 0 0 0 0 0
37073 - 0 0 0 0 0 0 0 0 0 0 0 0
37074 - 0 0 0 0 0 0 0 0 0 0 0 0
37075 - 0 0 0 0 0 0 0 0 0 10 10 10
37076 - 38 38 38 86 86 86 14 14 14 10 10 10
37077 -195 195 195 188 164 115 192 133 9 225 175 15
37078 -239 182 13 234 190 10 232 195 16 232 200 30
37079 -245 207 45 241 208 19 232 195 16 184 144 12
37080 -218 194 134 211 206 186 42 42 42 2 2 6
37081 - 2 2 6 2 2 6 2 2 6 2 2 6
37082 - 50 50 50 74 74 74 30 30 30 6 6 6
37083 - 0 0 0 0 0 0 0 0 0 0 0 0
37084 - 0 0 0 0 0 0 0 0 0 0 0 0
37085 - 0 0 0 0 0 0 0 0 0 0 0 0
37086 - 0 0 0 0 0 0 0 0 0 0 0 0
37087 - 0 0 0 0 0 0 0 0 0 0 0 0
37088 - 0 0 0 0 0 0 0 0 0 0 0 0
37089 - 0 0 0 0 0 0 0 0 0 0 0 0
37090 - 0 0 0 0 0 0 0 0 0 0 0 0
37091 - 0 0 0 0 0 0 0 0 0 0 0 0
37092 - 0 0 0 0 0 0 0 0 0 0 0 0
37093 - 0 0 0 0 0 0 0 0 0 0 0 0
37094 - 0 0 0 0 0 0 0 0 0 0 0 0
37095 - 0 0 0 0 0 0 0 0 0 10 10 10
37096 - 34 34 34 86 86 86 14 14 14 2 2 6
37097 -121 87 25 192 133 9 219 162 10 239 182 13
37098 -236 186 11 232 195 16 241 208 19 244 214 54
37099 -246 218 60 246 218 38 246 215 20 241 208 19
37100 -241 208 19 226 184 13 121 87 25 2 2 6
37101 - 2 2 6 2 2 6 2 2 6 2 2 6
37102 - 50 50 50 82 82 82 34 34 34 10 10 10
37103 - 0 0 0 0 0 0 0 0 0 0 0 0
37104 - 0 0 0 0 0 0 0 0 0 0 0 0
37105 - 0 0 0 0 0 0 0 0 0 0 0 0
37106 - 0 0 0 0 0 0 0 0 0 0 0 0
37107 - 0 0 0 0 0 0 0 0 0 0 0 0
37108 - 0 0 0 0 0 0 0 0 0 0 0 0
37109 - 0 0 0 0 0 0 0 0 0 0 0 0
37110 - 0 0 0 0 0 0 0 0 0 0 0 0
37111 - 0 0 0 0 0 0 0 0 0 0 0 0
37112 - 0 0 0 0 0 0 0 0 0 0 0 0
37113 - 0 0 0 0 0 0 0 0 0 0 0 0
37114 - 0 0 0 0 0 0 0 0 0 0 0 0
37115 - 0 0 0 0 0 0 0 0 0 10 10 10
37116 - 34 34 34 82 82 82 30 30 30 61 42 6
37117 -180 123 7 206 145 10 230 174 11 239 182 13
37118 -234 190 10 238 202 15 241 208 19 246 218 74
37119 -246 218 38 246 215 20 246 215 20 246 215 20
37120 -226 184 13 215 174 15 184 144 12 6 6 6
37121 - 2 2 6 2 2 6 2 2 6 2 2 6
37122 - 26 26 26 94 94 94 42 42 42 14 14 14
37123 - 0 0 0 0 0 0 0 0 0 0 0 0
37124 - 0 0 0 0 0 0 0 0 0 0 0 0
37125 - 0 0 0 0 0 0 0 0 0 0 0 0
37126 - 0 0 0 0 0 0 0 0 0 0 0 0
37127 - 0 0 0 0 0 0 0 0 0 0 0 0
37128 - 0 0 0 0 0 0 0 0 0 0 0 0
37129 - 0 0 0 0 0 0 0 0 0 0 0 0
37130 - 0 0 0 0 0 0 0 0 0 0 0 0
37131 - 0 0 0 0 0 0 0 0 0 0 0 0
37132 - 0 0 0 0 0 0 0 0 0 0 0 0
37133 - 0 0 0 0 0 0 0 0 0 0 0 0
37134 - 0 0 0 0 0 0 0 0 0 0 0 0
37135 - 0 0 0 0 0 0 0 0 0 10 10 10
37136 - 30 30 30 78 78 78 50 50 50 104 69 6
37137 -192 133 9 216 158 10 236 178 12 236 186 11
37138 -232 195 16 241 208 19 244 214 54 245 215 43
37139 -246 215 20 246 215 20 241 208 19 198 155 10
37140 -200 144 11 216 158 10 156 118 10 2 2 6
37141 - 2 2 6 2 2 6 2 2 6 2 2 6
37142 - 6 6 6 90 90 90 54 54 54 18 18 18
37143 - 6 6 6 0 0 0 0 0 0 0 0 0
37144 - 0 0 0 0 0 0 0 0 0 0 0 0
37145 - 0 0 0 0 0 0 0 0 0 0 0 0
37146 - 0 0 0 0 0 0 0 0 0 0 0 0
37147 - 0 0 0 0 0 0 0 0 0 0 0 0
37148 - 0 0 0 0 0 0 0 0 0 0 0 0
37149 - 0 0 0 0 0 0 0 0 0 0 0 0
37150 - 0 0 0 0 0 0 0 0 0 0 0 0
37151 - 0 0 0 0 0 0 0 0 0 0 0 0
37152 - 0 0 0 0 0 0 0 0 0 0 0 0
37153 - 0 0 0 0 0 0 0 0 0 0 0 0
37154 - 0 0 0 0 0 0 0 0 0 0 0 0
37155 - 0 0 0 0 0 0 0 0 0 10 10 10
37156 - 30 30 30 78 78 78 46 46 46 22 22 22
37157 -137 92 6 210 162 10 239 182 13 238 190 10
37158 -238 202 15 241 208 19 246 215 20 246 215 20
37159 -241 208 19 203 166 17 185 133 11 210 150 10
37160 -216 158 10 210 150 10 102 78 10 2 2 6
37161 - 6 6 6 54 54 54 14 14 14 2 2 6
37162 - 2 2 6 62 62 62 74 74 74 30 30 30
37163 - 10 10 10 0 0 0 0 0 0 0 0 0
37164 - 0 0 0 0 0 0 0 0 0 0 0 0
37165 - 0 0 0 0 0 0 0 0 0 0 0 0
37166 - 0 0 0 0 0 0 0 0 0 0 0 0
37167 - 0 0 0 0 0 0 0 0 0 0 0 0
37168 - 0 0 0 0 0 0 0 0 0 0 0 0
37169 - 0 0 0 0 0 0 0 0 0 0 0 0
37170 - 0 0 0 0 0 0 0 0 0 0 0 0
37171 - 0 0 0 0 0 0 0 0 0 0 0 0
37172 - 0 0 0 0 0 0 0 0 0 0 0 0
37173 - 0 0 0 0 0 0 0 0 0 0 0 0
37174 - 0 0 0 0 0 0 0 0 0 0 0 0
37175 - 0 0 0 0 0 0 0 0 0 10 10 10
37176 - 34 34 34 78 78 78 50 50 50 6 6 6
37177 - 94 70 30 139 102 15 190 146 13 226 184 13
37178 -232 200 30 232 195 16 215 174 15 190 146 13
37179 -168 122 10 192 133 9 210 150 10 213 154 11
37180 -202 150 34 182 157 106 101 98 89 2 2 6
37181 - 2 2 6 78 78 78 116 116 116 58 58 58
37182 - 2 2 6 22 22 22 90 90 90 46 46 46
37183 - 18 18 18 6 6 6 0 0 0 0 0 0
37184 - 0 0 0 0 0 0 0 0 0 0 0 0
37185 - 0 0 0 0 0 0 0 0 0 0 0 0
37186 - 0 0 0 0 0 0 0 0 0 0 0 0
37187 - 0 0 0 0 0 0 0 0 0 0 0 0
37188 - 0 0 0 0 0 0 0 0 0 0 0 0
37189 - 0 0 0 0 0 0 0 0 0 0 0 0
37190 - 0 0 0 0 0 0 0 0 0 0 0 0
37191 - 0 0 0 0 0 0 0 0 0 0 0 0
37192 - 0 0 0 0 0 0 0 0 0 0 0 0
37193 - 0 0 0 0 0 0 0 0 0 0 0 0
37194 - 0 0 0 0 0 0 0 0 0 0 0 0
37195 - 0 0 0 0 0 0 0 0 0 10 10 10
37196 - 38 38 38 86 86 86 50 50 50 6 6 6
37197 -128 128 128 174 154 114 156 107 11 168 122 10
37198 -198 155 10 184 144 12 197 138 11 200 144 11
37199 -206 145 10 206 145 10 197 138 11 188 164 115
37200 -195 195 195 198 198 198 174 174 174 14 14 14
37201 - 2 2 6 22 22 22 116 116 116 116 116 116
37202 - 22 22 22 2 2 6 74 74 74 70 70 70
37203 - 30 30 30 10 10 10 0 0 0 0 0 0
37204 - 0 0 0 0 0 0 0 0 0 0 0 0
37205 - 0 0 0 0 0 0 0 0 0 0 0 0
37206 - 0 0 0 0 0 0 0 0 0 0 0 0
37207 - 0 0 0 0 0 0 0 0 0 0 0 0
37208 - 0 0 0 0 0 0 0 0 0 0 0 0
37209 - 0 0 0 0 0 0 0 0 0 0 0 0
37210 - 0 0 0 0 0 0 0 0 0 0 0 0
37211 - 0 0 0 0 0 0 0 0 0 0 0 0
37212 - 0 0 0 0 0 0 0 0 0 0 0 0
37213 - 0 0 0 0 0 0 0 0 0 0 0 0
37214 - 0 0 0 0 0 0 0 0 0 0 0 0
37215 - 0 0 0 0 0 0 6 6 6 18 18 18
37216 - 50 50 50 101 101 101 26 26 26 10 10 10
37217 -138 138 138 190 190 190 174 154 114 156 107 11
37218 -197 138 11 200 144 11 197 138 11 192 133 9
37219 -180 123 7 190 142 34 190 178 144 187 187 187
37220 -202 202 202 221 221 221 214 214 214 66 66 66
37221 - 2 2 6 2 2 6 50 50 50 62 62 62
37222 - 6 6 6 2 2 6 10 10 10 90 90 90
37223 - 50 50 50 18 18 18 6 6 6 0 0 0
37224 - 0 0 0 0 0 0 0 0 0 0 0 0
37225 - 0 0 0 0 0 0 0 0 0 0 0 0
37226 - 0 0 0 0 0 0 0 0 0 0 0 0
37227 - 0 0 0 0 0 0 0 0 0 0 0 0
37228 - 0 0 0 0 0 0 0 0 0 0 0 0
37229 - 0 0 0 0 0 0 0 0 0 0 0 0
37230 - 0 0 0 0 0 0 0 0 0 0 0 0
37231 - 0 0 0 0 0 0 0 0 0 0 0 0
37232 - 0 0 0 0 0 0 0 0 0 0 0 0
37233 - 0 0 0 0 0 0 0 0 0 0 0 0
37234 - 0 0 0 0 0 0 0 0 0 0 0 0
37235 - 0 0 0 0 0 0 10 10 10 34 34 34
37236 - 74 74 74 74 74 74 2 2 6 6 6 6
37237 -144 144 144 198 198 198 190 190 190 178 166 146
37238 -154 121 60 156 107 11 156 107 11 168 124 44
37239 -174 154 114 187 187 187 190 190 190 210 210 210
37240 -246 246 246 253 253 253 253 253 253 182 182 182
37241 - 6 6 6 2 2 6 2 2 6 2 2 6
37242 - 2 2 6 2 2 6 2 2 6 62 62 62
37243 - 74 74 74 34 34 34 14 14 14 0 0 0
37244 - 0 0 0 0 0 0 0 0 0 0 0 0
37245 - 0 0 0 0 0 0 0 0 0 0 0 0
37246 - 0 0 0 0 0 0 0 0 0 0 0 0
37247 - 0 0 0 0 0 0 0 0 0 0 0 0
37248 - 0 0 0 0 0 0 0 0 0 0 0 0
37249 - 0 0 0 0 0 0 0 0 0 0 0 0
37250 - 0 0 0 0 0 0 0 0 0 0 0 0
37251 - 0 0 0 0 0 0 0 0 0 0 0 0
37252 - 0 0 0 0 0 0 0 0 0 0 0 0
37253 - 0 0 0 0 0 0 0 0 0 0 0 0
37254 - 0 0 0 0 0 0 0 0 0 0 0 0
37255 - 0 0 0 10 10 10 22 22 22 54 54 54
37256 - 94 94 94 18 18 18 2 2 6 46 46 46
37257 -234 234 234 221 221 221 190 190 190 190 190 190
37258 -190 190 190 187 187 187 187 187 187 190 190 190
37259 -190 190 190 195 195 195 214 214 214 242 242 242
37260 -253 253 253 253 253 253 253 253 253 253 253 253
37261 - 82 82 82 2 2 6 2 2 6 2 2 6
37262 - 2 2 6 2 2 6 2 2 6 14 14 14
37263 - 86 86 86 54 54 54 22 22 22 6 6 6
37264 - 0 0 0 0 0 0 0 0 0 0 0 0
37265 - 0 0 0 0 0 0 0 0 0 0 0 0
37266 - 0 0 0 0 0 0 0 0 0 0 0 0
37267 - 0 0 0 0 0 0 0 0 0 0 0 0
37268 - 0 0 0 0 0 0 0 0 0 0 0 0
37269 - 0 0 0 0 0 0 0 0 0 0 0 0
37270 - 0 0 0 0 0 0 0 0 0 0 0 0
37271 - 0 0 0 0 0 0 0 0 0 0 0 0
37272 - 0 0 0 0 0 0 0 0 0 0 0 0
37273 - 0 0 0 0 0 0 0 0 0 0 0 0
37274 - 0 0 0 0 0 0 0 0 0 0 0 0
37275 - 6 6 6 18 18 18 46 46 46 90 90 90
37276 - 46 46 46 18 18 18 6 6 6 182 182 182
37277 -253 253 253 246 246 246 206 206 206 190 190 190
37278 -190 190 190 190 190 190 190 190 190 190 190 190
37279 -206 206 206 231 231 231 250 250 250 253 253 253
37280 -253 253 253 253 253 253 253 253 253 253 253 253
37281 -202 202 202 14 14 14 2 2 6 2 2 6
37282 - 2 2 6 2 2 6 2 2 6 2 2 6
37283 - 42 42 42 86 86 86 42 42 42 18 18 18
37284 - 6 6 6 0 0 0 0 0 0 0 0 0
37285 - 0 0 0 0 0 0 0 0 0 0 0 0
37286 - 0 0 0 0 0 0 0 0 0 0 0 0
37287 - 0 0 0 0 0 0 0 0 0 0 0 0
37288 - 0 0 0 0 0 0 0 0 0 0 0 0
37289 - 0 0 0 0 0 0 0 0 0 0 0 0
37290 - 0 0 0 0 0 0 0 0 0 0 0 0
37291 - 0 0 0 0 0 0 0 0 0 0 0 0
37292 - 0 0 0 0 0 0 0 0 0 0 0 0
37293 - 0 0 0 0 0 0 0 0 0 0 0 0
37294 - 0 0 0 0 0 0 0 0 0 6 6 6
37295 - 14 14 14 38 38 38 74 74 74 66 66 66
37296 - 2 2 6 6 6 6 90 90 90 250 250 250
37297 -253 253 253 253 253 253 238 238 238 198 198 198
37298 -190 190 190 190 190 190 195 195 195 221 221 221
37299 -246 246 246 253 253 253 253 253 253 253 253 253
37300 -253 253 253 253 253 253 253 253 253 253 253 253
37301 -253 253 253 82 82 82 2 2 6 2 2 6
37302 - 2 2 6 2 2 6 2 2 6 2 2 6
37303 - 2 2 6 78 78 78 70 70 70 34 34 34
37304 - 14 14 14 6 6 6 0 0 0 0 0 0
37305 - 0 0 0 0 0 0 0 0 0 0 0 0
37306 - 0 0 0 0 0 0 0 0 0 0 0 0
37307 - 0 0 0 0 0 0 0 0 0 0 0 0
37308 - 0 0 0 0 0 0 0 0 0 0 0 0
37309 - 0 0 0 0 0 0 0 0 0 0 0 0
37310 - 0 0 0 0 0 0 0 0 0 0 0 0
37311 - 0 0 0 0 0 0 0 0 0 0 0 0
37312 - 0 0 0 0 0 0 0 0 0 0 0 0
37313 - 0 0 0 0 0 0 0 0 0 0 0 0
37314 - 0 0 0 0 0 0 0 0 0 14 14 14
37315 - 34 34 34 66 66 66 78 78 78 6 6 6
37316 - 2 2 6 18 18 18 218 218 218 253 253 253
37317 -253 253 253 253 253 253 253 253 253 246 246 246
37318 -226 226 226 231 231 231 246 246 246 253 253 253
37319 -253 253 253 253 253 253 253 253 253 253 253 253
37320 -253 253 253 253 253 253 253 253 253 253 253 253
37321 -253 253 253 178 178 178 2 2 6 2 2 6
37322 - 2 2 6 2 2 6 2 2 6 2 2 6
37323 - 2 2 6 18 18 18 90 90 90 62 62 62
37324 - 30 30 30 10 10 10 0 0 0 0 0 0
37325 - 0 0 0 0 0 0 0 0 0 0 0 0
37326 - 0 0 0 0 0 0 0 0 0 0 0 0
37327 - 0 0 0 0 0 0 0 0 0 0 0 0
37328 - 0 0 0 0 0 0 0 0 0 0 0 0
37329 - 0 0 0 0 0 0 0 0 0 0 0 0
37330 - 0 0 0 0 0 0 0 0 0 0 0 0
37331 - 0 0 0 0 0 0 0 0 0 0 0 0
37332 - 0 0 0 0 0 0 0 0 0 0 0 0
37333 - 0 0 0 0 0 0 0 0 0 0 0 0
37334 - 0 0 0 0 0 0 10 10 10 26 26 26
37335 - 58 58 58 90 90 90 18 18 18 2 2 6
37336 - 2 2 6 110 110 110 253 253 253 253 253 253
37337 -253 253 253 253 253 253 253 253 253 253 253 253
37338 -250 250 250 253 253 253 253 253 253 253 253 253
37339 -253 253 253 253 253 253 253 253 253 253 253 253
37340 -253 253 253 253 253 253 253 253 253 253 253 253
37341 -253 253 253 231 231 231 18 18 18 2 2 6
37342 - 2 2 6 2 2 6 2 2 6 2 2 6
37343 - 2 2 6 2 2 6 18 18 18 94 94 94
37344 - 54 54 54 26 26 26 10 10 10 0 0 0
37345 - 0 0 0 0 0 0 0 0 0 0 0 0
37346 - 0 0 0 0 0 0 0 0 0 0 0 0
37347 - 0 0 0 0 0 0 0 0 0 0 0 0
37348 - 0 0 0 0 0 0 0 0 0 0 0 0
37349 - 0 0 0 0 0 0 0 0 0 0 0 0
37350 - 0 0 0 0 0 0 0 0 0 0 0 0
37351 - 0 0 0 0 0 0 0 0 0 0 0 0
37352 - 0 0 0 0 0 0 0 0 0 0 0 0
37353 - 0 0 0 0 0 0 0 0 0 0 0 0
37354 - 0 0 0 6 6 6 22 22 22 50 50 50
37355 - 90 90 90 26 26 26 2 2 6 2 2 6
37356 - 14 14 14 195 195 195 250 250 250 253 253 253
37357 -253 253 253 253 253 253 253 253 253 253 253 253
37358 -253 253 253 253 253 253 253 253 253 253 253 253
37359 -253 253 253 253 253 253 253 253 253 253 253 253
37360 -253 253 253 253 253 253 253 253 253 253 253 253
37361 -250 250 250 242 242 242 54 54 54 2 2 6
37362 - 2 2 6 2 2 6 2 2 6 2 2 6
37363 - 2 2 6 2 2 6 2 2 6 38 38 38
37364 - 86 86 86 50 50 50 22 22 22 6 6 6
37365 - 0 0 0 0 0 0 0 0 0 0 0 0
37366 - 0 0 0 0 0 0 0 0 0 0 0 0
37367 - 0 0 0 0 0 0 0 0 0 0 0 0
37368 - 0 0 0 0 0 0 0 0 0 0 0 0
37369 - 0 0 0 0 0 0 0 0 0 0 0 0
37370 - 0 0 0 0 0 0 0 0 0 0 0 0
37371 - 0 0 0 0 0 0 0 0 0 0 0 0
37372 - 0 0 0 0 0 0 0 0 0 0 0 0
37373 - 0 0 0 0 0 0 0 0 0 0 0 0
37374 - 6 6 6 14 14 14 38 38 38 82 82 82
37375 - 34 34 34 2 2 6 2 2 6 2 2 6
37376 - 42 42 42 195 195 195 246 246 246 253 253 253
37377 -253 253 253 253 253 253 253 253 253 250 250 250
37378 -242 242 242 242 242 242 250 250 250 253 253 253
37379 -253 253 253 253 253 253 253 253 253 253 253 253
37380 -253 253 253 250 250 250 246 246 246 238 238 238
37381 -226 226 226 231 231 231 101 101 101 6 6 6
37382 - 2 2 6 2 2 6 2 2 6 2 2 6
37383 - 2 2 6 2 2 6 2 2 6 2 2 6
37384 - 38 38 38 82 82 82 42 42 42 14 14 14
37385 - 6 6 6 0 0 0 0 0 0 0 0 0
37386 - 0 0 0 0 0 0 0 0 0 0 0 0
37387 - 0 0 0 0 0 0 0 0 0 0 0 0
37388 - 0 0 0 0 0 0 0 0 0 0 0 0
37389 - 0 0 0 0 0 0 0 0 0 0 0 0
37390 - 0 0 0 0 0 0 0 0 0 0 0 0
37391 - 0 0 0 0 0 0 0 0 0 0 0 0
37392 - 0 0 0 0 0 0 0 0 0 0 0 0
37393 - 0 0 0 0 0 0 0 0 0 0 0 0
37394 - 10 10 10 26 26 26 62 62 62 66 66 66
37395 - 2 2 6 2 2 6 2 2 6 6 6 6
37396 - 70 70 70 170 170 170 206 206 206 234 234 234
37397 -246 246 246 250 250 250 250 250 250 238 238 238
37398 -226 226 226 231 231 231 238 238 238 250 250 250
37399 -250 250 250 250 250 250 246 246 246 231 231 231
37400 -214 214 214 206 206 206 202 202 202 202 202 202
37401 -198 198 198 202 202 202 182 182 182 18 18 18
37402 - 2 2 6 2 2 6 2 2 6 2 2 6
37403 - 2 2 6 2 2 6 2 2 6 2 2 6
37404 - 2 2 6 62 62 62 66 66 66 30 30 30
37405 - 10 10 10 0 0 0 0 0 0 0 0 0
37406 - 0 0 0 0 0 0 0 0 0 0 0 0
37407 - 0 0 0 0 0 0 0 0 0 0 0 0
37408 - 0 0 0 0 0 0 0 0 0 0 0 0
37409 - 0 0 0 0 0 0 0 0 0 0 0 0
37410 - 0 0 0 0 0 0 0 0 0 0 0 0
37411 - 0 0 0 0 0 0 0 0 0 0 0 0
37412 - 0 0 0 0 0 0 0 0 0 0 0 0
37413 - 0 0 0 0 0 0 0 0 0 0 0 0
37414 - 14 14 14 42 42 42 82 82 82 18 18 18
37415 - 2 2 6 2 2 6 2 2 6 10 10 10
37416 - 94 94 94 182 182 182 218 218 218 242 242 242
37417 -250 250 250 253 253 253 253 253 253 250 250 250
37418 -234 234 234 253 253 253 253 253 253 253 253 253
37419 -253 253 253 253 253 253 253 253 253 246 246 246
37420 -238 238 238 226 226 226 210 210 210 202 202 202
37421 -195 195 195 195 195 195 210 210 210 158 158 158
37422 - 6 6 6 14 14 14 50 50 50 14 14 14
37423 - 2 2 6 2 2 6 2 2 6 2 2 6
37424 - 2 2 6 6 6 6 86 86 86 46 46 46
37425 - 18 18 18 6 6 6 0 0 0 0 0 0
37426 - 0 0 0 0 0 0 0 0 0 0 0 0
37427 - 0 0 0 0 0 0 0 0 0 0 0 0
37428 - 0 0 0 0 0 0 0 0 0 0 0 0
37429 - 0 0 0 0 0 0 0 0 0 0 0 0
37430 - 0 0 0 0 0 0 0 0 0 0 0 0
37431 - 0 0 0 0 0 0 0 0 0 0 0 0
37432 - 0 0 0 0 0 0 0 0 0 0 0 0
37433 - 0 0 0 0 0 0 0 0 0 6 6 6
37434 - 22 22 22 54 54 54 70 70 70 2 2 6
37435 - 2 2 6 10 10 10 2 2 6 22 22 22
37436 -166 166 166 231 231 231 250 250 250 253 253 253
37437 -253 253 253 253 253 253 253 253 253 250 250 250
37438 -242 242 242 253 253 253 253 253 253 253 253 253
37439 -253 253 253 253 253 253 253 253 253 253 253 253
37440 -253 253 253 253 253 253 253 253 253 246 246 246
37441 -231 231 231 206 206 206 198 198 198 226 226 226
37442 - 94 94 94 2 2 6 6 6 6 38 38 38
37443 - 30 30 30 2 2 6 2 2 6 2 2 6
37444 - 2 2 6 2 2 6 62 62 62 66 66 66
37445 - 26 26 26 10 10 10 0 0 0 0 0 0
37446 - 0 0 0 0 0 0 0 0 0 0 0 0
37447 - 0 0 0 0 0 0 0 0 0 0 0 0
37448 - 0 0 0 0 0 0 0 0 0 0 0 0
37449 - 0 0 0 0 0 0 0 0 0 0 0 0
37450 - 0 0 0 0 0 0 0 0 0 0 0 0
37451 - 0 0 0 0 0 0 0 0 0 0 0 0
37452 - 0 0 0 0 0 0 0 0 0 0 0 0
37453 - 0 0 0 0 0 0 0 0 0 10 10 10
37454 - 30 30 30 74 74 74 50 50 50 2 2 6
37455 - 26 26 26 26 26 26 2 2 6 106 106 106
37456 -238 238 238 253 253 253 253 253 253 253 253 253
37457 -253 253 253 253 253 253 253 253 253 253 253 253
37458 -253 253 253 253 253 253 253 253 253 253 253 253
37459 -253 253 253 253 253 253 253 253 253 253 253 253
37460 -253 253 253 253 253 253 253 253 253 253 253 253
37461 -253 253 253 246 246 246 218 218 218 202 202 202
37462 -210 210 210 14 14 14 2 2 6 2 2 6
37463 - 30 30 30 22 22 22 2 2 6 2 2 6
37464 - 2 2 6 2 2 6 18 18 18 86 86 86
37465 - 42 42 42 14 14 14 0 0 0 0 0 0
37466 - 0 0 0 0 0 0 0 0 0 0 0 0
37467 - 0 0 0 0 0 0 0 0 0 0 0 0
37468 - 0 0 0 0 0 0 0 0 0 0 0 0
37469 - 0 0 0 0 0 0 0 0 0 0 0 0
37470 - 0 0 0 0 0 0 0 0 0 0 0 0
37471 - 0 0 0 0 0 0 0 0 0 0 0 0
37472 - 0 0 0 0 0 0 0 0 0 0 0 0
37473 - 0 0 0 0 0 0 0 0 0 14 14 14
37474 - 42 42 42 90 90 90 22 22 22 2 2 6
37475 - 42 42 42 2 2 6 18 18 18 218 218 218
37476 -253 253 253 253 253 253 253 253 253 253 253 253
37477 -253 253 253 253 253 253 253 253 253 253 253 253
37478 -253 253 253 253 253 253 253 253 253 253 253 253
37479 -253 253 253 253 253 253 253 253 253 253 253 253
37480 -253 253 253 253 253 253 253 253 253 253 253 253
37481 -253 253 253 253 253 253 250 250 250 221 221 221
37482 -218 218 218 101 101 101 2 2 6 14 14 14
37483 - 18 18 18 38 38 38 10 10 10 2 2 6
37484 - 2 2 6 2 2 6 2 2 6 78 78 78
37485 - 58 58 58 22 22 22 6 6 6 0 0 0
37486 - 0 0 0 0 0 0 0 0 0 0 0 0
37487 - 0 0 0 0 0 0 0 0 0 0 0 0
37488 - 0 0 0 0 0 0 0 0 0 0 0 0
37489 - 0 0 0 0 0 0 0 0 0 0 0 0
37490 - 0 0 0 0 0 0 0 0 0 0 0 0
37491 - 0 0 0 0 0 0 0 0 0 0 0 0
37492 - 0 0 0 0 0 0 0 0 0 0 0 0
37493 - 0 0 0 0 0 0 6 6 6 18 18 18
37494 - 54 54 54 82 82 82 2 2 6 26 26 26
37495 - 22 22 22 2 2 6 123 123 123 253 253 253
37496 -253 253 253 253 253 253 253 253 253 253 253 253
37497 -253 253 253 253 253 253 253 253 253 253 253 253
37498 -253 253 253 253 253 253 253 253 253 253 253 253
37499 -253 253 253 253 253 253 253 253 253 253 253 253
37500 -253 253 253 253 253 253 253 253 253 253 253 253
37501 -253 253 253 253 253 253 253 253 253 250 250 250
37502 -238 238 238 198 198 198 6 6 6 38 38 38
37503 - 58 58 58 26 26 26 38 38 38 2 2 6
37504 - 2 2 6 2 2 6 2 2 6 46 46 46
37505 - 78 78 78 30 30 30 10 10 10 0 0 0
37506 - 0 0 0 0 0 0 0 0 0 0 0 0
37507 - 0 0 0 0 0 0 0 0 0 0 0 0
37508 - 0 0 0 0 0 0 0 0 0 0 0 0
37509 - 0 0 0 0 0 0 0 0 0 0 0 0
37510 - 0 0 0 0 0 0 0 0 0 0 0 0
37511 - 0 0 0 0 0 0 0 0 0 0 0 0
37512 - 0 0 0 0 0 0 0 0 0 0 0 0
37513 - 0 0 0 0 0 0 10 10 10 30 30 30
37514 - 74 74 74 58 58 58 2 2 6 42 42 42
37515 - 2 2 6 22 22 22 231 231 231 253 253 253
37516 -253 253 253 253 253 253 253 253 253 253 253 253
37517 -253 253 253 253 253 253 253 253 253 250 250 250
37518 -253 253 253 253 253 253 253 253 253 253 253 253
37519 -253 253 253 253 253 253 253 253 253 253 253 253
37520 -253 253 253 253 253 253 253 253 253 253 253 253
37521 -253 253 253 253 253 253 253 253 253 253 253 253
37522 -253 253 253 246 246 246 46 46 46 38 38 38
37523 - 42 42 42 14 14 14 38 38 38 14 14 14
37524 - 2 2 6 2 2 6 2 2 6 6 6 6
37525 - 86 86 86 46 46 46 14 14 14 0 0 0
37526 - 0 0 0 0 0 0 0 0 0 0 0 0
37527 - 0 0 0 0 0 0 0 0 0 0 0 0
37528 - 0 0 0 0 0 0 0 0 0 0 0 0
37529 - 0 0 0 0 0 0 0 0 0 0 0 0
37530 - 0 0 0 0 0 0 0 0 0 0 0 0
37531 - 0 0 0 0 0 0 0 0 0 0 0 0
37532 - 0 0 0 0 0 0 0 0 0 0 0 0
37533 - 0 0 0 6 6 6 14 14 14 42 42 42
37534 - 90 90 90 18 18 18 18 18 18 26 26 26
37535 - 2 2 6 116 116 116 253 253 253 253 253 253
37536 -253 253 253 253 253 253 253 253 253 253 253 253
37537 -253 253 253 253 253 253 250 250 250 238 238 238
37538 -253 253 253 253 253 253 253 253 253 253 253 253
37539 -253 253 253 253 253 253 253 253 253 253 253 253
37540 -253 253 253 253 253 253 253 253 253 253 253 253
37541 -253 253 253 253 253 253 253 253 253 253 253 253
37542 -253 253 253 253 253 253 94 94 94 6 6 6
37543 - 2 2 6 2 2 6 10 10 10 34 34 34
37544 - 2 2 6 2 2 6 2 2 6 2 2 6
37545 - 74 74 74 58 58 58 22 22 22 6 6 6
37546 - 0 0 0 0 0 0 0 0 0 0 0 0
37547 - 0 0 0 0 0 0 0 0 0 0 0 0
37548 - 0 0 0 0 0 0 0 0 0 0 0 0
37549 - 0 0 0 0 0 0 0 0 0 0 0 0
37550 - 0 0 0 0 0 0 0 0 0 0 0 0
37551 - 0 0 0 0 0 0 0 0 0 0 0 0
37552 - 0 0 0 0 0 0 0 0 0 0 0 0
37553 - 0 0 0 10 10 10 26 26 26 66 66 66
37554 - 82 82 82 2 2 6 38 38 38 6 6 6
37555 - 14 14 14 210 210 210 253 253 253 253 253 253
37556 -253 253 253 253 253 253 253 253 253 253 253 253
37557 -253 253 253 253 253 253 246 246 246 242 242 242
37558 -253 253 253 253 253 253 253 253 253 253 253 253
37559 -253 253 253 253 253 253 253 253 253 253 253 253
37560 -253 253 253 253 253 253 253 253 253 253 253 253
37561 -253 253 253 253 253 253 253 253 253 253 253 253
37562 -253 253 253 253 253 253 144 144 144 2 2 6
37563 - 2 2 6 2 2 6 2 2 6 46 46 46
37564 - 2 2 6 2 2 6 2 2 6 2 2 6
37565 - 42 42 42 74 74 74 30 30 30 10 10 10
37566 - 0 0 0 0 0 0 0 0 0 0 0 0
37567 - 0 0 0 0 0 0 0 0 0 0 0 0
37568 - 0 0 0 0 0 0 0 0 0 0 0 0
37569 - 0 0 0 0 0 0 0 0 0 0 0 0
37570 - 0 0 0 0 0 0 0 0 0 0 0 0
37571 - 0 0 0 0 0 0 0 0 0 0 0 0
37572 - 0 0 0 0 0 0 0 0 0 0 0 0
37573 - 6 6 6 14 14 14 42 42 42 90 90 90
37574 - 26 26 26 6 6 6 42 42 42 2 2 6
37575 - 74 74 74 250 250 250 253 253 253 253 253 253
37576 -253 253 253 253 253 253 253 253 253 253 253 253
37577 -253 253 253 253 253 253 242 242 242 242 242 242
37578 -253 253 253 253 253 253 253 253 253 253 253 253
37579 -253 253 253 253 253 253 253 253 253 253 253 253
37580 -253 253 253 253 253 253 253 253 253 253 253 253
37581 -253 253 253 253 253 253 253 253 253 253 253 253
37582 -253 253 253 253 253 253 182 182 182 2 2 6
37583 - 2 2 6 2 2 6 2 2 6 46 46 46
37584 - 2 2 6 2 2 6 2 2 6 2 2 6
37585 - 10 10 10 86 86 86 38 38 38 10 10 10
37586 - 0 0 0 0 0 0 0 0 0 0 0 0
37587 - 0 0 0 0 0 0 0 0 0 0 0 0
37588 - 0 0 0 0 0 0 0 0 0 0 0 0
37589 - 0 0 0 0 0 0 0 0 0 0 0 0
37590 - 0 0 0 0 0 0 0 0 0 0 0 0
37591 - 0 0 0 0 0 0 0 0 0 0 0 0
37592 - 0 0 0 0 0 0 0 0 0 0 0 0
37593 - 10 10 10 26 26 26 66 66 66 82 82 82
37594 - 2 2 6 22 22 22 18 18 18 2 2 6
37595 -149 149 149 253 253 253 253 253 253 253 253 253
37596 -253 253 253 253 253 253 253 253 253 253 253 253
37597 -253 253 253 253 253 253 234 234 234 242 242 242
37598 -253 253 253 253 253 253 253 253 253 253 253 253
37599 -253 253 253 253 253 253 253 253 253 253 253 253
37600 -253 253 253 253 253 253 253 253 253 253 253 253
37601 -253 253 253 253 253 253 253 253 253 253 253 253
37602 -253 253 253 253 253 253 206 206 206 2 2 6
37603 - 2 2 6 2 2 6 2 2 6 38 38 38
37604 - 2 2 6 2 2 6 2 2 6 2 2 6
37605 - 6 6 6 86 86 86 46 46 46 14 14 14
37606 - 0 0 0 0 0 0 0 0 0 0 0 0
37607 - 0 0 0 0 0 0 0 0 0 0 0 0
37608 - 0 0 0 0 0 0 0 0 0 0 0 0
37609 - 0 0 0 0 0 0 0 0 0 0 0 0
37610 - 0 0 0 0 0 0 0 0 0 0 0 0
37611 - 0 0 0 0 0 0 0 0 0 0 0 0
37612 - 0 0 0 0 0 0 0 0 0 6 6 6
37613 - 18 18 18 46 46 46 86 86 86 18 18 18
37614 - 2 2 6 34 34 34 10 10 10 6 6 6
37615 -210 210 210 253 253 253 253 253 253 253 253 253
37616 -253 253 253 253 253 253 253 253 253 253 253 253
37617 -253 253 253 253 253 253 234 234 234 242 242 242
37618 -253 253 253 253 253 253 253 253 253 253 253 253
37619 -253 253 253 253 253 253 253 253 253 253 253 253
37620 -253 253 253 253 253 253 253 253 253 253 253 253
37621 -253 253 253 253 253 253 253 253 253 253 253 253
37622 -253 253 253 253 253 253 221 221 221 6 6 6
37623 - 2 2 6 2 2 6 6 6 6 30 30 30
37624 - 2 2 6 2 2 6 2 2 6 2 2 6
37625 - 2 2 6 82 82 82 54 54 54 18 18 18
37626 - 6 6 6 0 0 0 0 0 0 0 0 0
37627 - 0 0 0 0 0 0 0 0 0 0 0 0
37628 - 0 0 0 0 0 0 0 0 0 0 0 0
37629 - 0 0 0 0 0 0 0 0 0 0 0 0
37630 - 0 0 0 0 0 0 0 0 0 0 0 0
37631 - 0 0 0 0 0 0 0 0 0 0 0 0
37632 - 0 0 0 0 0 0 0 0 0 10 10 10
37633 - 26 26 26 66 66 66 62 62 62 2 2 6
37634 - 2 2 6 38 38 38 10 10 10 26 26 26
37635 -238 238 238 253 253 253 253 253 253 253 253 253
37636 -253 253 253 253 253 253 253 253 253 253 253 253
37637 -253 253 253 253 253 253 231 231 231 238 238 238
37638 -253 253 253 253 253 253 253 253 253 253 253 253
37639 -253 253 253 253 253 253 253 253 253 253 253 253
37640 -253 253 253 253 253 253 253 253 253 253 253 253
37641 -253 253 253 253 253 253 253 253 253 253 253 253
37642 -253 253 253 253 253 253 231 231 231 6 6 6
37643 - 2 2 6 2 2 6 10 10 10 30 30 30
37644 - 2 2 6 2 2 6 2 2 6 2 2 6
37645 - 2 2 6 66 66 66 58 58 58 22 22 22
37646 - 6 6 6 0 0 0 0 0 0 0 0 0
37647 - 0 0 0 0 0 0 0 0 0 0 0 0
37648 - 0 0 0 0 0 0 0 0 0 0 0 0
37649 - 0 0 0 0 0 0 0 0 0 0 0 0
37650 - 0 0 0 0 0 0 0 0 0 0 0 0
37651 - 0 0 0 0 0 0 0 0 0 0 0 0
37652 - 0 0 0 0 0 0 0 0 0 10 10 10
37653 - 38 38 38 78 78 78 6 6 6 2 2 6
37654 - 2 2 6 46 46 46 14 14 14 42 42 42
37655 -246 246 246 253 253 253 253 253 253 253 253 253
37656 -253 253 253 253 253 253 253 253 253 253 253 253
37657 -253 253 253 253 253 253 231 231 231 242 242 242
37658 -253 253 253 253 253 253 253 253 253 253 253 253
37659 -253 253 253 253 253 253 253 253 253 253 253 253
37660 -253 253 253 253 253 253 253 253 253 253 253 253
37661 -253 253 253 253 253 253 253 253 253 253 253 253
37662 -253 253 253 253 253 253 234 234 234 10 10 10
37663 - 2 2 6 2 2 6 22 22 22 14 14 14
37664 - 2 2 6 2 2 6 2 2 6 2 2 6
37665 - 2 2 6 66 66 66 62 62 62 22 22 22
37666 - 6 6 6 0 0 0 0 0 0 0 0 0
37667 - 0 0 0 0 0 0 0 0 0 0 0 0
37668 - 0 0 0 0 0 0 0 0 0 0 0 0
37669 - 0 0 0 0 0 0 0 0 0 0 0 0
37670 - 0 0 0 0 0 0 0 0 0 0 0 0
37671 - 0 0 0 0 0 0 0 0 0 0 0 0
37672 - 0 0 0 0 0 0 6 6 6 18 18 18
37673 - 50 50 50 74 74 74 2 2 6 2 2 6
37674 - 14 14 14 70 70 70 34 34 34 62 62 62
37675 -250 250 250 253 253 253 253 253 253 253 253 253
37676 -253 253 253 253 253 253 253 253 253 253 253 253
37677 -253 253 253 253 253 253 231 231 231 246 246 246
37678 -253 253 253 253 253 253 253 253 253 253 253 253
37679 -253 253 253 253 253 253 253 253 253 253 253 253
37680 -253 253 253 253 253 253 253 253 253 253 253 253
37681 -253 253 253 253 253 253 253 253 253 253 253 253
37682 -253 253 253 253 253 253 234 234 234 14 14 14
37683 - 2 2 6 2 2 6 30 30 30 2 2 6
37684 - 2 2 6 2 2 6 2 2 6 2 2 6
37685 - 2 2 6 66 66 66 62 62 62 22 22 22
37686 - 6 6 6 0 0 0 0 0 0 0 0 0
37687 - 0 0 0 0 0 0 0 0 0 0 0 0
37688 - 0 0 0 0 0 0 0 0 0 0 0 0
37689 - 0 0 0 0 0 0 0 0 0 0 0 0
37690 - 0 0 0 0 0 0 0 0 0 0 0 0
37691 - 0 0 0 0 0 0 0 0 0 0 0 0
37692 - 0 0 0 0 0 0 6 6 6 18 18 18
37693 - 54 54 54 62 62 62 2 2 6 2 2 6
37694 - 2 2 6 30 30 30 46 46 46 70 70 70
37695 -250 250 250 253 253 253 253 253 253 253 253 253
37696 -253 253 253 253 253 253 253 253 253 253 253 253
37697 -253 253 253 253 253 253 231 231 231 246 246 246
37698 -253 253 253 253 253 253 253 253 253 253 253 253
37699 -253 253 253 253 253 253 253 253 253 253 253 253
37700 -253 253 253 253 253 253 253 253 253 253 253 253
37701 -253 253 253 253 253 253 253 253 253 253 253 253
37702 -253 253 253 253 253 253 226 226 226 10 10 10
37703 - 2 2 6 6 6 6 30 30 30 2 2 6
37704 - 2 2 6 2 2 6 2 2 6 2 2 6
37705 - 2 2 6 66 66 66 58 58 58 22 22 22
37706 - 6 6 6 0 0 0 0 0 0 0 0 0
37707 - 0 0 0 0 0 0 0 0 0 0 0 0
37708 - 0 0 0 0 0 0 0 0 0 0 0 0
37709 - 0 0 0 0 0 0 0 0 0 0 0 0
37710 - 0 0 0 0 0 0 0 0 0 0 0 0
37711 - 0 0 0 0 0 0 0 0 0 0 0 0
37712 - 0 0 0 0 0 0 6 6 6 22 22 22
37713 - 58 58 58 62 62 62 2 2 6 2 2 6
37714 - 2 2 6 2 2 6 30 30 30 78 78 78
37715 -250 250 250 253 253 253 253 253 253 253 253 253
37716 -253 253 253 253 253 253 253 253 253 253 253 253
37717 -253 253 253 253 253 253 231 231 231 246 246 246
37718 -253 253 253 253 253 253 253 253 253 253 253 253
37719 -253 253 253 253 253 253 253 253 253 253 253 253
37720 -253 253 253 253 253 253 253 253 253 253 253 253
37721 -253 253 253 253 253 253 253 253 253 253 253 253
37722 -253 253 253 253 253 253 206 206 206 2 2 6
37723 - 22 22 22 34 34 34 18 14 6 22 22 22
37724 - 26 26 26 18 18 18 6 6 6 2 2 6
37725 - 2 2 6 82 82 82 54 54 54 18 18 18
37726 - 6 6 6 0 0 0 0 0 0 0 0 0
37727 - 0 0 0 0 0 0 0 0 0 0 0 0
37728 - 0 0 0 0 0 0 0 0 0 0 0 0
37729 - 0 0 0 0 0 0 0 0 0 0 0 0
37730 - 0 0 0 0 0 0 0 0 0 0 0 0
37731 - 0 0 0 0 0 0 0 0 0 0 0 0
37732 - 0 0 0 0 0 0 6 6 6 26 26 26
37733 - 62 62 62 106 106 106 74 54 14 185 133 11
37734 -210 162 10 121 92 8 6 6 6 62 62 62
37735 -238 238 238 253 253 253 253 253 253 253 253 253
37736 -253 253 253 253 253 253 253 253 253 253 253 253
37737 -253 253 253 253 253 253 231 231 231 246 246 246
37738 -253 253 253 253 253 253 253 253 253 253 253 253
37739 -253 253 253 253 253 253 253 253 253 253 253 253
37740 -253 253 253 253 253 253 253 253 253 253 253 253
37741 -253 253 253 253 253 253 253 253 253 253 253 253
37742 -253 253 253 253 253 253 158 158 158 18 18 18
37743 - 14 14 14 2 2 6 2 2 6 2 2 6
37744 - 6 6 6 18 18 18 66 66 66 38 38 38
37745 - 6 6 6 94 94 94 50 50 50 18 18 18
37746 - 6 6 6 0 0 0 0 0 0 0 0 0
37747 - 0 0 0 0 0 0 0 0 0 0 0 0
37748 - 0 0 0 0 0 0 0 0 0 0 0 0
37749 - 0 0 0 0 0 0 0 0 0 0 0 0
37750 - 0 0 0 0 0 0 0 0 0 0 0 0
37751 - 0 0 0 0 0 0 0 0 0 6 6 6
37752 - 10 10 10 10 10 10 18 18 18 38 38 38
37753 - 78 78 78 142 134 106 216 158 10 242 186 14
37754 -246 190 14 246 190 14 156 118 10 10 10 10
37755 - 90 90 90 238 238 238 253 253 253 253 253 253
37756 -253 253 253 253 253 253 253 253 253 253 253 253
37757 -253 253 253 253 253 253 231 231 231 250 250 250
37758 -253 253 253 253 253 253 253 253 253 253 253 253
37759 -253 253 253 253 253 253 253 253 253 253 253 253
37760 -253 253 253 253 253 253 253 253 253 253 253 253
37761 -253 253 253 253 253 253 253 253 253 246 230 190
37762 -238 204 91 238 204 91 181 142 44 37 26 9
37763 - 2 2 6 2 2 6 2 2 6 2 2 6
37764 - 2 2 6 2 2 6 38 38 38 46 46 46
37765 - 26 26 26 106 106 106 54 54 54 18 18 18
37766 - 6 6 6 0 0 0 0 0 0 0 0 0
37767 - 0 0 0 0 0 0 0 0 0 0 0 0
37768 - 0 0 0 0 0 0 0 0 0 0 0 0
37769 - 0 0 0 0 0 0 0 0 0 0 0 0
37770 - 0 0 0 0 0 0 0 0 0 0 0 0
37771 - 0 0 0 6 6 6 14 14 14 22 22 22
37772 - 30 30 30 38 38 38 50 50 50 70 70 70
37773 -106 106 106 190 142 34 226 170 11 242 186 14
37774 -246 190 14 246 190 14 246 190 14 154 114 10
37775 - 6 6 6 74 74 74 226 226 226 253 253 253
37776 -253 253 253 253 253 253 253 253 253 253 253 253
37777 -253 253 253 253 253 253 231 231 231 250 250 250
37778 -253 253 253 253 253 253 253 253 253 253 253 253
37779 -253 253 253 253 253 253 253 253 253 253 253 253
37780 -253 253 253 253 253 253 253 253 253 253 253 253
37781 -253 253 253 253 253 253 253 253 253 228 184 62
37782 -241 196 14 241 208 19 232 195 16 38 30 10
37783 - 2 2 6 2 2 6 2 2 6 2 2 6
37784 - 2 2 6 6 6 6 30 30 30 26 26 26
37785 -203 166 17 154 142 90 66 66 66 26 26 26
37786 - 6 6 6 0 0 0 0 0 0 0 0 0
37787 - 0 0 0 0 0 0 0 0 0 0 0 0
37788 - 0 0 0 0 0 0 0 0 0 0 0 0
37789 - 0 0 0 0 0 0 0 0 0 0 0 0
37790 - 0 0 0 0 0 0 0 0 0 0 0 0
37791 - 6 6 6 18 18 18 38 38 38 58 58 58
37792 - 78 78 78 86 86 86 101 101 101 123 123 123
37793 -175 146 61 210 150 10 234 174 13 246 186 14
37794 -246 190 14 246 190 14 246 190 14 238 190 10
37795 -102 78 10 2 2 6 46 46 46 198 198 198
37796 -253 253 253 253 253 253 253 253 253 253 253 253
37797 -253 253 253 253 253 253 234 234 234 242 242 242
37798 -253 253 253 253 253 253 253 253 253 253 253 253
37799 -253 253 253 253 253 253 253 253 253 253 253 253
37800 -253 253 253 253 253 253 253 253 253 253 253 253
37801 -253 253 253 253 253 253 253 253 253 224 178 62
37802 -242 186 14 241 196 14 210 166 10 22 18 6
37803 - 2 2 6 2 2 6 2 2 6 2 2 6
37804 - 2 2 6 2 2 6 6 6 6 121 92 8
37805 -238 202 15 232 195 16 82 82 82 34 34 34
37806 - 10 10 10 0 0 0 0 0 0 0 0 0
37807 - 0 0 0 0 0 0 0 0 0 0 0 0
37808 - 0 0 0 0 0 0 0 0 0 0 0 0
37809 - 0 0 0 0 0 0 0 0 0 0 0 0
37810 - 0 0 0 0 0 0 0 0 0 0 0 0
37811 - 14 14 14 38 38 38 70 70 70 154 122 46
37812 -190 142 34 200 144 11 197 138 11 197 138 11
37813 -213 154 11 226 170 11 242 186 14 246 190 14
37814 -246 190 14 246 190 14 246 190 14 246 190 14
37815 -225 175 15 46 32 6 2 2 6 22 22 22
37816 -158 158 158 250 250 250 253 253 253 253 253 253
37817 -253 253 253 253 253 253 253 253 253 253 253 253
37818 -253 253 253 253 253 253 253 253 253 253 253 253
37819 -253 253 253 253 253 253 253 253 253 253 253 253
37820 -253 253 253 253 253 253 253 253 253 253 253 253
37821 -253 253 253 250 250 250 242 242 242 224 178 62
37822 -239 182 13 236 186 11 213 154 11 46 32 6
37823 - 2 2 6 2 2 6 2 2 6 2 2 6
37824 - 2 2 6 2 2 6 61 42 6 225 175 15
37825 -238 190 10 236 186 11 112 100 78 42 42 42
37826 - 14 14 14 0 0 0 0 0 0 0 0 0
37827 - 0 0 0 0 0 0 0 0 0 0 0 0
37828 - 0 0 0 0 0 0 0 0 0 0 0 0
37829 - 0 0 0 0 0 0 0 0 0 0 0 0
37830 - 0 0 0 0 0 0 0 0 0 6 6 6
37831 - 22 22 22 54 54 54 154 122 46 213 154 11
37832 -226 170 11 230 174 11 226 170 11 226 170 11
37833 -236 178 12 242 186 14 246 190 14 246 190 14
37834 -246 190 14 246 190 14 246 190 14 246 190 14
37835 -241 196 14 184 144 12 10 10 10 2 2 6
37836 - 6 6 6 116 116 116 242 242 242 253 253 253
37837 -253 253 253 253 253 253 253 253 253 253 253 253
37838 -253 253 253 253 253 253 253 253 253 253 253 253
37839 -253 253 253 253 253 253 253 253 253 253 253 253
37840 -253 253 253 253 253 253 253 253 253 253 253 253
37841 -253 253 253 231 231 231 198 198 198 214 170 54
37842 -236 178 12 236 178 12 210 150 10 137 92 6
37843 - 18 14 6 2 2 6 2 2 6 2 2 6
37844 - 6 6 6 70 47 6 200 144 11 236 178 12
37845 -239 182 13 239 182 13 124 112 88 58 58 58
37846 - 22 22 22 6 6 6 0 0 0 0 0 0
37847 - 0 0 0 0 0 0 0 0 0 0 0 0
37848 - 0 0 0 0 0 0 0 0 0 0 0 0
37849 - 0 0 0 0 0 0 0 0 0 0 0 0
37850 - 0 0 0 0 0 0 0 0 0 10 10 10
37851 - 30 30 30 70 70 70 180 133 36 226 170 11
37852 -239 182 13 242 186 14 242 186 14 246 186 14
37853 -246 190 14 246 190 14 246 190 14 246 190 14
37854 -246 190 14 246 190 14 246 190 14 246 190 14
37855 -246 190 14 232 195 16 98 70 6 2 2 6
37856 - 2 2 6 2 2 6 66 66 66 221 221 221
37857 -253 253 253 253 253 253 253 253 253 253 253 253
37858 -253 253 253 253 253 253 253 253 253 253 253 253
37859 -253 253 253 253 253 253 253 253 253 253 253 253
37860 -253 253 253 253 253 253 253 253 253 253 253 253
37861 -253 253 253 206 206 206 198 198 198 214 166 58
37862 -230 174 11 230 174 11 216 158 10 192 133 9
37863 -163 110 8 116 81 8 102 78 10 116 81 8
37864 -167 114 7 197 138 11 226 170 11 239 182 13
37865 -242 186 14 242 186 14 162 146 94 78 78 78
37866 - 34 34 34 14 14 14 6 6 6 0 0 0
37867 - 0 0 0 0 0 0 0 0 0 0 0 0
37868 - 0 0 0 0 0 0 0 0 0 0 0 0
37869 - 0 0 0 0 0 0 0 0 0 0 0 0
37870 - 0 0 0 0 0 0 0 0 0 6 6 6
37871 - 30 30 30 78 78 78 190 142 34 226 170 11
37872 -239 182 13 246 190 14 246 190 14 246 190 14
37873 -246 190 14 246 190 14 246 190 14 246 190 14
37874 -246 190 14 246 190 14 246 190 14 246 190 14
37875 -246 190 14 241 196 14 203 166 17 22 18 6
37876 - 2 2 6 2 2 6 2 2 6 38 38 38
37877 -218 218 218 253 253 253 253 253 253 253 253 253
37878 -253 253 253 253 253 253 253 253 253 253 253 253
37879 -253 253 253 253 253 253 253 253 253 253 253 253
37880 -253 253 253 253 253 253 253 253 253 253 253 253
37881 -250 250 250 206 206 206 198 198 198 202 162 69
37882 -226 170 11 236 178 12 224 166 10 210 150 10
37883 -200 144 11 197 138 11 192 133 9 197 138 11
37884 -210 150 10 226 170 11 242 186 14 246 190 14
37885 -246 190 14 246 186 14 225 175 15 124 112 88
37886 - 62 62 62 30 30 30 14 14 14 6 6 6
37887 - 0 0 0 0 0 0 0 0 0 0 0 0
37888 - 0 0 0 0 0 0 0 0 0 0 0 0
37889 - 0 0 0 0 0 0 0 0 0 0 0 0
37890 - 0 0 0 0 0 0 0 0 0 10 10 10
37891 - 30 30 30 78 78 78 174 135 50 224 166 10
37892 -239 182 13 246 190 14 246 190 14 246 190 14
37893 -246 190 14 246 190 14 246 190 14 246 190 14
37894 -246 190 14 246 190 14 246 190 14 246 190 14
37895 -246 190 14 246 190 14 241 196 14 139 102 15
37896 - 2 2 6 2 2 6 2 2 6 2 2 6
37897 - 78 78 78 250 250 250 253 253 253 253 253 253
37898 -253 253 253 253 253 253 253 253 253 253 253 253
37899 -253 253 253 253 253 253 253 253 253 253 253 253
37900 -253 253 253 253 253 253 253 253 253 253 253 253
37901 -250 250 250 214 214 214 198 198 198 190 150 46
37902 -219 162 10 236 178 12 234 174 13 224 166 10
37903 -216 158 10 213 154 11 213 154 11 216 158 10
37904 -226 170 11 239 182 13 246 190 14 246 190 14
37905 -246 190 14 246 190 14 242 186 14 206 162 42
37906 -101 101 101 58 58 58 30 30 30 14 14 14
37907 - 6 6 6 0 0 0 0 0 0 0 0 0
37908 - 0 0 0 0 0 0 0 0 0 0 0 0
37909 - 0 0 0 0 0 0 0 0 0 0 0 0
37910 - 0 0 0 0 0 0 0 0 0 10 10 10
37911 - 30 30 30 74 74 74 174 135 50 216 158 10
37912 -236 178 12 246 190 14 246 190 14 246 190 14
37913 -246 190 14 246 190 14 246 190 14 246 190 14
37914 -246 190 14 246 190 14 246 190 14 246 190 14
37915 -246 190 14 246 190 14 241 196 14 226 184 13
37916 - 61 42 6 2 2 6 2 2 6 2 2 6
37917 - 22 22 22 238 238 238 253 253 253 253 253 253
37918 -253 253 253 253 253 253 253 253 253 253 253 253
37919 -253 253 253 253 253 253 253 253 253 253 253 253
37920 -253 253 253 253 253 253 253 253 253 253 253 253
37921 -253 253 253 226 226 226 187 187 187 180 133 36
37922 -216 158 10 236 178 12 239 182 13 236 178 12
37923 -230 174 11 226 170 11 226 170 11 230 174 11
37924 -236 178 12 242 186 14 246 190 14 246 190 14
37925 -246 190 14 246 190 14 246 186 14 239 182 13
37926 -206 162 42 106 106 106 66 66 66 34 34 34
37927 - 14 14 14 6 6 6 0 0 0 0 0 0
37928 - 0 0 0 0 0 0 0 0 0 0 0 0
37929 - 0 0 0 0 0 0 0 0 0 0 0 0
37930 - 0 0 0 0 0 0 0 0 0 6 6 6
37931 - 26 26 26 70 70 70 163 133 67 213 154 11
37932 -236 178 12 246 190 14 246 190 14 246 190 14
37933 -246 190 14 246 190 14 246 190 14 246 190 14
37934 -246 190 14 246 190 14 246 190 14 246 190 14
37935 -246 190 14 246 190 14 246 190 14 241 196 14
37936 -190 146 13 18 14 6 2 2 6 2 2 6
37937 - 46 46 46 246 246 246 253 253 253 253 253 253
37938 -253 253 253 253 253 253 253 253 253 253 253 253
37939 -253 253 253 253 253 253 253 253 253 253 253 253
37940 -253 253 253 253 253 253 253 253 253 253 253 253
37941 -253 253 253 221 221 221 86 86 86 156 107 11
37942 -216 158 10 236 178 12 242 186 14 246 186 14
37943 -242 186 14 239 182 13 239 182 13 242 186 14
37944 -242 186 14 246 186 14 246 190 14 246 190 14
37945 -246 190 14 246 190 14 246 190 14 246 190 14
37946 -242 186 14 225 175 15 142 122 72 66 66 66
37947 - 30 30 30 10 10 10 0 0 0 0 0 0
37948 - 0 0 0 0 0 0 0 0 0 0 0 0
37949 - 0 0 0 0 0 0 0 0 0 0 0 0
37950 - 0 0 0 0 0 0 0 0 0 6 6 6
37951 - 26 26 26 70 70 70 163 133 67 210 150 10
37952 -236 178 12 246 190 14 246 190 14 246 190 14
37953 -246 190 14 246 190 14 246 190 14 246 190 14
37954 -246 190 14 246 190 14 246 190 14 246 190 14
37955 -246 190 14 246 190 14 246 190 14 246 190 14
37956 -232 195 16 121 92 8 34 34 34 106 106 106
37957 -221 221 221 253 253 253 253 253 253 253 253 253
37958 -253 253 253 253 253 253 253 253 253 253 253 253
37959 -253 253 253 253 253 253 253 253 253 253 253 253
37960 -253 253 253 253 253 253 253 253 253 253 253 253
37961 -242 242 242 82 82 82 18 14 6 163 110 8
37962 -216 158 10 236 178 12 242 186 14 246 190 14
37963 -246 190 14 246 190 14 246 190 14 246 190 14
37964 -246 190 14 246 190 14 246 190 14 246 190 14
37965 -246 190 14 246 190 14 246 190 14 246 190 14
37966 -246 190 14 246 190 14 242 186 14 163 133 67
37967 - 46 46 46 18 18 18 6 6 6 0 0 0
37968 - 0 0 0 0 0 0 0 0 0 0 0 0
37969 - 0 0 0 0 0 0 0 0 0 0 0 0
37970 - 0 0 0 0 0 0 0 0 0 10 10 10
37971 - 30 30 30 78 78 78 163 133 67 210 150 10
37972 -236 178 12 246 186 14 246 190 14 246 190 14
37973 -246 190 14 246 190 14 246 190 14 246 190 14
37974 -246 190 14 246 190 14 246 190 14 246 190 14
37975 -246 190 14 246 190 14 246 190 14 246 190 14
37976 -241 196 14 215 174 15 190 178 144 253 253 253
37977 -253 253 253 253 253 253 253 253 253 253 253 253
37978 -253 253 253 253 253 253 253 253 253 253 253 253
37979 -253 253 253 253 253 253 253 253 253 253 253 253
37980 -253 253 253 253 253 253 253 253 253 218 218 218
37981 - 58 58 58 2 2 6 22 18 6 167 114 7
37982 -216 158 10 236 178 12 246 186 14 246 190 14
37983 -246 190 14 246 190 14 246 190 14 246 190 14
37984 -246 190 14 246 190 14 246 190 14 246 190 14
37985 -246 190 14 246 190 14 246 190 14 246 190 14
37986 -246 190 14 246 186 14 242 186 14 190 150 46
37987 - 54 54 54 22 22 22 6 6 6 0 0 0
37988 - 0 0 0 0 0 0 0 0 0 0 0 0
37989 - 0 0 0 0 0 0 0 0 0 0 0 0
37990 - 0 0 0 0 0 0 0 0 0 14 14 14
37991 - 38 38 38 86 86 86 180 133 36 213 154 11
37992 -236 178 12 246 186 14 246 190 14 246 190 14
37993 -246 190 14 246 190 14 246 190 14 246 190 14
37994 -246 190 14 246 190 14 246 190 14 246 190 14
37995 -246 190 14 246 190 14 246 190 14 246 190 14
37996 -246 190 14 232 195 16 190 146 13 214 214 214
37997 -253 253 253 253 253 253 253 253 253 253 253 253
37998 -253 253 253 253 253 253 253 253 253 253 253 253
37999 -253 253 253 253 253 253 253 253 253 253 253 253
38000 -253 253 253 250 250 250 170 170 170 26 26 26
38001 - 2 2 6 2 2 6 37 26 9 163 110 8
38002 -219 162 10 239 182 13 246 186 14 246 190 14
38003 -246 190 14 246 190 14 246 190 14 246 190 14
38004 -246 190 14 246 190 14 246 190 14 246 190 14
38005 -246 190 14 246 190 14 246 190 14 246 190 14
38006 -246 186 14 236 178 12 224 166 10 142 122 72
38007 - 46 46 46 18 18 18 6 6 6 0 0 0
38008 - 0 0 0 0 0 0 0 0 0 0 0 0
38009 - 0 0 0 0 0 0 0 0 0 0 0 0
38010 - 0 0 0 0 0 0 6 6 6 18 18 18
38011 - 50 50 50 109 106 95 192 133 9 224 166 10
38012 -242 186 14 246 190 14 246 190 14 246 190 14
38013 -246 190 14 246 190 14 246 190 14 246 190 14
38014 -246 190 14 246 190 14 246 190 14 246 190 14
38015 -246 190 14 246 190 14 246 190 14 246 190 14
38016 -242 186 14 226 184 13 210 162 10 142 110 46
38017 -226 226 226 253 253 253 253 253 253 253 253 253
38018 -253 253 253 253 253 253 253 253 253 253 253 253
38019 -253 253 253 253 253 253 253 253 253 253 253 253
38020 -198 198 198 66 66 66 2 2 6 2 2 6
38021 - 2 2 6 2 2 6 50 34 6 156 107 11
38022 -219 162 10 239 182 13 246 186 14 246 190 14
38023 -246 190 14 246 190 14 246 190 14 246 190 14
38024 -246 190 14 246 190 14 246 190 14 246 190 14
38025 -246 190 14 246 190 14 246 190 14 242 186 14
38026 -234 174 13 213 154 11 154 122 46 66 66 66
38027 - 30 30 30 10 10 10 0 0 0 0 0 0
38028 - 0 0 0 0 0 0 0 0 0 0 0 0
38029 - 0 0 0 0 0 0 0 0 0 0 0 0
38030 - 0 0 0 0 0 0 6 6 6 22 22 22
38031 - 58 58 58 154 121 60 206 145 10 234 174 13
38032 -242 186 14 246 186 14 246 190 14 246 190 14
38033 -246 190 14 246 190 14 246 190 14 246 190 14
38034 -246 190 14 246 190 14 246 190 14 246 190 14
38035 -246 190 14 246 190 14 246 190 14 246 190 14
38036 -246 186 14 236 178 12 210 162 10 163 110 8
38037 - 61 42 6 138 138 138 218 218 218 250 250 250
38038 -253 253 253 253 253 253 253 253 253 250 250 250
38039 -242 242 242 210 210 210 144 144 144 66 66 66
38040 - 6 6 6 2 2 6 2 2 6 2 2 6
38041 - 2 2 6 2 2 6 61 42 6 163 110 8
38042 -216 158 10 236 178 12 246 190 14 246 190 14
38043 -246 190 14 246 190 14 246 190 14 246 190 14
38044 -246 190 14 246 190 14 246 190 14 246 190 14
38045 -246 190 14 239 182 13 230 174 11 216 158 10
38046 -190 142 34 124 112 88 70 70 70 38 38 38
38047 - 18 18 18 6 6 6 0 0 0 0 0 0
38048 - 0 0 0 0 0 0 0 0 0 0 0 0
38049 - 0 0 0 0 0 0 0 0 0 0 0 0
38050 - 0 0 0 0 0 0 6 6 6 22 22 22
38051 - 62 62 62 168 124 44 206 145 10 224 166 10
38052 -236 178 12 239 182 13 242 186 14 242 186 14
38053 -246 186 14 246 190 14 246 190 14 246 190 14
38054 -246 190 14 246 190 14 246 190 14 246 190 14
38055 -246 190 14 246 190 14 246 190 14 246 190 14
38056 -246 190 14 236 178 12 216 158 10 175 118 6
38057 - 80 54 7 2 2 6 6 6 6 30 30 30
38058 - 54 54 54 62 62 62 50 50 50 38 38 38
38059 - 14 14 14 2 2 6 2 2 6 2 2 6
38060 - 2 2 6 2 2 6 2 2 6 2 2 6
38061 - 2 2 6 6 6 6 80 54 7 167 114 7
38062 -213 154 11 236 178 12 246 190 14 246 190 14
38063 -246 190 14 246 190 14 246 190 14 246 190 14
38064 -246 190 14 242 186 14 239 182 13 239 182 13
38065 -230 174 11 210 150 10 174 135 50 124 112 88
38066 - 82 82 82 54 54 54 34 34 34 18 18 18
38067 - 6 6 6 0 0 0 0 0 0 0 0 0
38068 - 0 0 0 0 0 0 0 0 0 0 0 0
38069 - 0 0 0 0 0 0 0 0 0 0 0 0
38070 - 0 0 0 0 0 0 6 6 6 18 18 18
38071 - 50 50 50 158 118 36 192 133 9 200 144 11
38072 -216 158 10 219 162 10 224 166 10 226 170 11
38073 -230 174 11 236 178 12 239 182 13 239 182 13
38074 -242 186 14 246 186 14 246 190 14 246 190 14
38075 -246 190 14 246 190 14 246 190 14 246 190 14
38076 -246 186 14 230 174 11 210 150 10 163 110 8
38077 -104 69 6 10 10 10 2 2 6 2 2 6
38078 - 2 2 6 2 2 6 2 2 6 2 2 6
38079 - 2 2 6 2 2 6 2 2 6 2 2 6
38080 - 2 2 6 2 2 6 2 2 6 2 2 6
38081 - 2 2 6 6 6 6 91 60 6 167 114 7
38082 -206 145 10 230 174 11 242 186 14 246 190 14
38083 -246 190 14 246 190 14 246 186 14 242 186 14
38084 -239 182 13 230 174 11 224 166 10 213 154 11
38085 -180 133 36 124 112 88 86 86 86 58 58 58
38086 - 38 38 38 22 22 22 10 10 10 6 6 6
38087 - 0 0 0 0 0 0 0 0 0 0 0 0
38088 - 0 0 0 0 0 0 0 0 0 0 0 0
38089 - 0 0 0 0 0 0 0 0 0 0 0 0
38090 - 0 0 0 0 0 0 0 0 0 14 14 14
38091 - 34 34 34 70 70 70 138 110 50 158 118 36
38092 -167 114 7 180 123 7 192 133 9 197 138 11
38093 -200 144 11 206 145 10 213 154 11 219 162 10
38094 -224 166 10 230 174 11 239 182 13 242 186 14
38095 -246 186 14 246 186 14 246 186 14 246 186 14
38096 -239 182 13 216 158 10 185 133 11 152 99 6
38097 -104 69 6 18 14 6 2 2 6 2 2 6
38098 - 2 2 6 2 2 6 2 2 6 2 2 6
38099 - 2 2 6 2 2 6 2 2 6 2 2 6
38100 - 2 2 6 2 2 6 2 2 6 2 2 6
38101 - 2 2 6 6 6 6 80 54 7 152 99 6
38102 -192 133 9 219 162 10 236 178 12 239 182 13
38103 -246 186 14 242 186 14 239 182 13 236 178 12
38104 -224 166 10 206 145 10 192 133 9 154 121 60
38105 - 94 94 94 62 62 62 42 42 42 22 22 22
38106 - 14 14 14 6 6 6 0 0 0 0 0 0
38107 - 0 0 0 0 0 0 0 0 0 0 0 0
38108 - 0 0 0 0 0 0 0 0 0 0 0 0
38109 - 0 0 0 0 0 0 0 0 0 0 0 0
38110 - 0 0 0 0 0 0 0 0 0 6 6 6
38111 - 18 18 18 34 34 34 58 58 58 78 78 78
38112 -101 98 89 124 112 88 142 110 46 156 107 11
38113 -163 110 8 167 114 7 175 118 6 180 123 7
38114 -185 133 11 197 138 11 210 150 10 219 162 10
38115 -226 170 11 236 178 12 236 178 12 234 174 13
38116 -219 162 10 197 138 11 163 110 8 130 83 6
38117 - 91 60 6 10 10 10 2 2 6 2 2 6
38118 - 18 18 18 38 38 38 38 38 38 38 38 38
38119 - 38 38 38 38 38 38 38 38 38 38 38 38
38120 - 38 38 38 38 38 38 26 26 26 2 2 6
38121 - 2 2 6 6 6 6 70 47 6 137 92 6
38122 -175 118 6 200 144 11 219 162 10 230 174 11
38123 -234 174 13 230 174 11 219 162 10 210 150 10
38124 -192 133 9 163 110 8 124 112 88 82 82 82
38125 - 50 50 50 30 30 30 14 14 14 6 6 6
38126 - 0 0 0 0 0 0 0 0 0 0 0 0
38127 - 0 0 0 0 0 0 0 0 0 0 0 0
38128 - 0 0 0 0 0 0 0 0 0 0 0 0
38129 - 0 0 0 0 0 0 0 0 0 0 0 0
38130 - 0 0 0 0 0 0 0 0 0 0 0 0
38131 - 6 6 6 14 14 14 22 22 22 34 34 34
38132 - 42 42 42 58 58 58 74 74 74 86 86 86
38133 -101 98 89 122 102 70 130 98 46 121 87 25
38134 -137 92 6 152 99 6 163 110 8 180 123 7
38135 -185 133 11 197 138 11 206 145 10 200 144 11
38136 -180 123 7 156 107 11 130 83 6 104 69 6
38137 - 50 34 6 54 54 54 110 110 110 101 98 89
38138 - 86 86 86 82 82 82 78 78 78 78 78 78
38139 - 78 78 78 78 78 78 78 78 78 78 78 78
38140 - 78 78 78 82 82 82 86 86 86 94 94 94
38141 -106 106 106 101 101 101 86 66 34 124 80 6
38142 -156 107 11 180 123 7 192 133 9 200 144 11
38143 -206 145 10 200 144 11 192 133 9 175 118 6
38144 -139 102 15 109 106 95 70 70 70 42 42 42
38145 - 22 22 22 10 10 10 0 0 0 0 0 0
38146 - 0 0 0 0 0 0 0 0 0 0 0 0
38147 - 0 0 0 0 0 0 0 0 0 0 0 0
38148 - 0 0 0 0 0 0 0 0 0 0 0 0
38149 - 0 0 0 0 0 0 0 0 0 0 0 0
38150 - 0 0 0 0 0 0 0 0 0 0 0 0
38151 - 0 0 0 0 0 0 6 6 6 10 10 10
38152 - 14 14 14 22 22 22 30 30 30 38 38 38
38153 - 50 50 50 62 62 62 74 74 74 90 90 90
38154 -101 98 89 112 100 78 121 87 25 124 80 6
38155 -137 92 6 152 99 6 152 99 6 152 99 6
38156 -138 86 6 124 80 6 98 70 6 86 66 30
38157 -101 98 89 82 82 82 58 58 58 46 46 46
38158 - 38 38 38 34 34 34 34 34 34 34 34 34
38159 - 34 34 34 34 34 34 34 34 34 34 34 34
38160 - 34 34 34 34 34 34 38 38 38 42 42 42
38161 - 54 54 54 82 82 82 94 86 76 91 60 6
38162 -134 86 6 156 107 11 167 114 7 175 118 6
38163 -175 118 6 167 114 7 152 99 6 121 87 25
38164 -101 98 89 62 62 62 34 34 34 18 18 18
38165 - 6 6 6 0 0 0 0 0 0 0 0 0
38166 - 0 0 0 0 0 0 0 0 0 0 0 0
38167 - 0 0 0 0 0 0 0 0 0 0 0 0
38168 - 0 0 0 0 0 0 0 0 0 0 0 0
38169 - 0 0 0 0 0 0 0 0 0 0 0 0
38170 - 0 0 0 0 0 0 0 0 0 0 0 0
38171 - 0 0 0 0 0 0 0 0 0 0 0 0
38172 - 0 0 0 6 6 6 6 6 6 10 10 10
38173 - 18 18 18 22 22 22 30 30 30 42 42 42
38174 - 50 50 50 66 66 66 86 86 86 101 98 89
38175 -106 86 58 98 70 6 104 69 6 104 69 6
38176 -104 69 6 91 60 6 82 62 34 90 90 90
38177 - 62 62 62 38 38 38 22 22 22 14 14 14
38178 - 10 10 10 10 10 10 10 10 10 10 10 10
38179 - 10 10 10 10 10 10 6 6 6 10 10 10
38180 - 10 10 10 10 10 10 10 10 10 14 14 14
38181 - 22 22 22 42 42 42 70 70 70 89 81 66
38182 - 80 54 7 104 69 6 124 80 6 137 92 6
38183 -134 86 6 116 81 8 100 82 52 86 86 86
38184 - 58 58 58 30 30 30 14 14 14 6 6 6
38185 - 0 0 0 0 0 0 0 0 0 0 0 0
38186 - 0 0 0 0 0 0 0 0 0 0 0 0
38187 - 0 0 0 0 0 0 0 0 0 0 0 0
38188 - 0 0 0 0 0 0 0 0 0 0 0 0
38189 - 0 0 0 0 0 0 0 0 0 0 0 0
38190 - 0 0 0 0 0 0 0 0 0 0 0 0
38191 - 0 0 0 0 0 0 0 0 0 0 0 0
38192 - 0 0 0 0 0 0 0 0 0 0 0 0
38193 - 0 0 0 6 6 6 10 10 10 14 14 14
38194 - 18 18 18 26 26 26 38 38 38 54 54 54
38195 - 70 70 70 86 86 86 94 86 76 89 81 66
38196 - 89 81 66 86 86 86 74 74 74 50 50 50
38197 - 30 30 30 14 14 14 6 6 6 0 0 0
38198 - 0 0 0 0 0 0 0 0 0 0 0 0
38199 - 0 0 0 0 0 0 0 0 0 0 0 0
38200 - 0 0 0 0 0 0 0 0 0 0 0 0
38201 - 6 6 6 18 18 18 34 34 34 58 58 58
38202 - 82 82 82 89 81 66 89 81 66 89 81 66
38203 - 94 86 66 94 86 76 74 74 74 50 50 50
38204 - 26 26 26 14 14 14 6 6 6 0 0 0
38205 - 0 0 0 0 0 0 0 0 0 0 0 0
38206 - 0 0 0 0 0 0 0 0 0 0 0 0
38207 - 0 0 0 0 0 0 0 0 0 0 0 0
38208 - 0 0 0 0 0 0 0 0 0 0 0 0
38209 - 0 0 0 0 0 0 0 0 0 0 0 0
38210 - 0 0 0 0 0 0 0 0 0 0 0 0
38211 - 0 0 0 0 0 0 0 0 0 0 0 0
38212 - 0 0 0 0 0 0 0 0 0 0 0 0
38213 - 0 0 0 0 0 0 0 0 0 0 0 0
38214 - 6 6 6 6 6 6 14 14 14 18 18 18
38215 - 30 30 30 38 38 38 46 46 46 54 54 54
38216 - 50 50 50 42 42 42 30 30 30 18 18 18
38217 - 10 10 10 0 0 0 0 0 0 0 0 0
38218 - 0 0 0 0 0 0 0 0 0 0 0 0
38219 - 0 0 0 0 0 0 0 0 0 0 0 0
38220 - 0 0 0 0 0 0 0 0 0 0 0 0
38221 - 0 0 0 6 6 6 14 14 14 26 26 26
38222 - 38 38 38 50 50 50 58 58 58 58 58 58
38223 - 54 54 54 42 42 42 30 30 30 18 18 18
38224 - 10 10 10 0 0 0 0 0 0 0 0 0
38225 - 0 0 0 0 0 0 0 0 0 0 0 0
38226 - 0 0 0 0 0 0 0 0 0 0 0 0
38227 - 0 0 0 0 0 0 0 0 0 0 0 0
38228 - 0 0 0 0 0 0 0 0 0 0 0 0
38229 - 0 0 0 0 0 0 0 0 0 0 0 0
38230 - 0 0 0 0 0 0 0 0 0 0 0 0
38231 - 0 0 0 0 0 0 0 0 0 0 0 0
38232 - 0 0 0 0 0 0 0 0 0 0 0 0
38233 - 0 0 0 0 0 0 0 0 0 0 0 0
38234 - 0 0 0 0 0 0 0 0 0 6 6 6
38235 - 6 6 6 10 10 10 14 14 14 18 18 18
38236 - 18 18 18 14 14 14 10 10 10 6 6 6
38237 - 0 0 0 0 0 0 0 0 0 0 0 0
38238 - 0 0 0 0 0 0 0 0 0 0 0 0
38239 - 0 0 0 0 0 0 0 0 0 0 0 0
38240 - 0 0 0 0 0 0 0 0 0 0 0 0
38241 - 0 0 0 0 0 0 0 0 0 6 6 6
38242 - 14 14 14 18 18 18 22 22 22 22 22 22
38243 - 18 18 18 14 14 14 10 10 10 6 6 6
38244 - 0 0 0 0 0 0 0 0 0 0 0 0
38245 - 0 0 0 0 0 0 0 0 0 0 0 0
38246 - 0 0 0 0 0 0 0 0 0 0 0 0
38247 - 0 0 0 0 0 0 0 0 0 0 0 0
38248 - 0 0 0 0 0 0 0 0 0 0 0 0
38249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38262 +4 4 4 4 4 4
38263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38276 +4 4 4 4 4 4
38277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38290 +4 4 4 4 4 4
38291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38304 +4 4 4 4 4 4
38305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38318 +4 4 4 4 4 4
38319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38332 +4 4 4 4 4 4
38333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38337 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38338 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38342 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38343 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38344 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38346 +4 4 4 4 4 4
38347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38351 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38352 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38353 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38356 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38357 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38358 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38359 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38360 +4 4 4 4 4 4
38361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38365 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38366 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38367 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38370 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38371 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38372 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38373 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38374 +4 4 4 4 4 4
38375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38378 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38379 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38380 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38381 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38383 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38384 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38385 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38386 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38387 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38388 +4 4 4 4 4 4
38389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38392 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38393 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38394 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38395 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38396 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38397 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38398 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38399 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38400 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38401 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38402 +4 4 4 4 4 4
38403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38406 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38407 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38408 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38409 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38410 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38411 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38412 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38413 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38414 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38415 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38416 +4 4 4 4 4 4
38417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38419 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38420 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38421 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38422 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38423 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38424 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38425 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38426 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38427 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38428 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38429 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38430 +4 4 4 4 4 4
38431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38433 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38434 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38435 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38436 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38437 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38438 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38439 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38440 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38441 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38442 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38443 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38444 +4 4 4 4 4 4
38445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38447 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38448 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38449 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38450 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38451 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38452 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38453 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38454 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38455 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38456 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38457 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38458 +4 4 4 4 4 4
38459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38461 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38462 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38463 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38464 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38465 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38466 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38467 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38468 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38469 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38470 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38471 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38472 +4 4 4 4 4 4
38473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38474 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38475 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38476 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38477 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38478 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38479 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38480 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38481 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38482 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38483 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38484 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38485 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38486 +4 4 4 4 4 4
38487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38488 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38489 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38490 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38491 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38492 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38493 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38494 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38495 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38496 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38497 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38498 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38499 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38500 +0 0 0 4 4 4
38501 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38502 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38503 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38504 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38505 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38506 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38507 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38508 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38509 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38510 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38511 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38512 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38513 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38514 +2 0 0 0 0 0
38515 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38516 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38517 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38518 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38519 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38520 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38521 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38522 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38523 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38524 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38525 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38526 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38527 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38528 +37 38 37 0 0 0
38529 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38530 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38531 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38532 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38533 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38534 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38535 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38536 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38537 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38538 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38539 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38540 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38541 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38542 +85 115 134 4 0 0
38543 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38544 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38545 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38546 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38547 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38548 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38549 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38550 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38551 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38552 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38553 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38554 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38555 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38556 +60 73 81 4 0 0
38557 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38558 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38559 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38560 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38561 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38562 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38563 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38564 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38565 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38566 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38567 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38568 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38569 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38570 +16 19 21 4 0 0
38571 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38572 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38573 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38574 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38575 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38576 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38577 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38578 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38579 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38580 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38581 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38582 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38583 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38584 +4 0 0 4 3 3
38585 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38586 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38587 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38589 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38590 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38591 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38592 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38593 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38594 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38595 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38596 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38597 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38598 +3 2 2 4 4 4
38599 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38600 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38601 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38602 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38603 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38604 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38605 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38606 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38607 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38608 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38609 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38610 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38611 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38612 +4 4 4 4 4 4
38613 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38614 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38615 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38616 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38617 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38618 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38619 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38620 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38621 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38622 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38623 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38624 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38625 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38626 +4 4 4 4 4 4
38627 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38628 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38629 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38630 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38631 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38632 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38633 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38634 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38635 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38636 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38637 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38638 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38639 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38640 +5 5 5 5 5 5
38641 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38642 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38643 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38644 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38645 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38646 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38647 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38648 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38649 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38650 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38651 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38652 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38653 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38654 +5 5 5 4 4 4
38655 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38656 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38657 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38658 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38659 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38660 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38661 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38662 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38663 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38664 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38665 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38666 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38668 +4 4 4 4 4 4
38669 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38670 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38671 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38672 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38673 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38674 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38675 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38676 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38677 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38678 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38679 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38680 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38682 +4 4 4 4 4 4
38683 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38684 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38685 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38686 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38687 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38688 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38689 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38690 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38691 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38692 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38693 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38694 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38696 +4 4 4 4 4 4
38697 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38698 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38699 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38700 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38701 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38702 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38703 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38704 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38705 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38706 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38707 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38710 +4 4 4 4 4 4
38711 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38712 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38713 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38714 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38715 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38716 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38717 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38718 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38719 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38720 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38721 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38724 +4 4 4 4 4 4
38725 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38726 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38727 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38728 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38729 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38730 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38731 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38732 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38733 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38734 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38735 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38738 +4 4 4 4 4 4
38739 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38740 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38741 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38742 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38743 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38744 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38745 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38746 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38747 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38748 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38749 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38752 +4 4 4 4 4 4
38753 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38754 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38755 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38756 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38757 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38758 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38759 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38760 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38761 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38762 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38763 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38766 +4 4 4 4 4 4
38767 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38768 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38769 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38770 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38771 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38772 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38773 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38774 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38775 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38776 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38777 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38780 +4 4 4 4 4 4
38781 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38782 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38783 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38784 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38785 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38786 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38787 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38788 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38789 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38790 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38791 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38792 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38794 +4 4 4 4 4 4
38795 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38796 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38797 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38798 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38799 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38800 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38801 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38802 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38803 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38804 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38805 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38808 +4 4 4 4 4 4
38809 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38810 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38811 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38812 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38813 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38814 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38815 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38816 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38817 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38818 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38819 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38822 +4 4 4 4 4 4
38823 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38824 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38825 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38826 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38827 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38828 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38829 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38830 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38831 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38832 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38833 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38836 +4 4 4 4 4 4
38837 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38838 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38839 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38840 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38841 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38842 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38843 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38844 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38845 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38846 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38847 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38850 +4 4 4 4 4 4
38851 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38852 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38853 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38854 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38855 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38856 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38857 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38858 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38859 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38860 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38861 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38864 +4 4 4 4 4 4
38865 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38866 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38867 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38868 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38869 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38870 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38871 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38872 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38873 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38874 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38875 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38878 +4 4 4 4 4 4
38879 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38880 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38881 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38882 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38883 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38884 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38885 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38886 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38887 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38888 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38889 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38892 +4 4 4 4 4 4
38893 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38894 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38895 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38896 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38897 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38898 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38899 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38900 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38901 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38902 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38903 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38906 +4 4 4 4 4 4
38907 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38908 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38909 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38910 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38911 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38912 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38913 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38914 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38915 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38916 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38917 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38920 +4 4 4 4 4 4
38921 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38922 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38923 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38924 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38925 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38926 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38927 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38928 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38929 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38930 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38931 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38934 +4 4 4 4 4 4
38935 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38936 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38937 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38938 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38939 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38940 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38941 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38942 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38943 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38944 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38945 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38948 +4 4 4 4 4 4
38949 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38950 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38951 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38952 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38953 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38954 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38955 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38956 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38957 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38958 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38959 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38962 +4 4 4 4 4 4
38963 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38964 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38965 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38966 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38967 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38968 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38969 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38970 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38971 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38972 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38973 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38976 +4 4 4 4 4 4
38977 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38978 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38979 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38980 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38981 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38982 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38983 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38984 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38985 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38986 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38987 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38990 +4 4 4 4 4 4
38991 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38992 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38993 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38994 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38995 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38996 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38997 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38998 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38999 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
39000 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39001 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39004 +4 4 4 4 4 4
39005 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
39006 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
39007 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
39008 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
39009 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
39010 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
39011 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
39012 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
39013 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
39014 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
39015 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018 +4 4 4 4 4 4
39019 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
39020 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39021 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
39022 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
39023 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
39024 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
39025 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
39026 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
39027 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
39028 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
39029 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39032 +4 4 4 4 4 4
39033 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
39034 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39035 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
39036 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
39037 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
39038 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
39039 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
39041 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
39042 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
39043 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046 +4 4 4 4 4 4
39047 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
39048 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
39049 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
39050 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
39051 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
39052 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
39053 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
39054 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
39055 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
39056 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
39057 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060 +4 4 4 4 4 4
39061 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
39062 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
39063 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39064 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
39065 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
39066 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
39067 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
39068 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
39069 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
39070 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
39071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074 +4 4 4 4 4 4
39075 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39076 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
39077 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
39078 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
39079 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
39080 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
39081 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
39082 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
39083 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
39084 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39088 +4 4 4 4 4 4
39089 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
39090 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
39091 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
39092 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
39093 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
39094 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
39095 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
39096 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
39097 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
39098 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39102 +4 4 4 4 4 4
39103 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39104 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39105 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39106 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39107 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39108 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39109 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39110 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39111 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39112 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39116 +4 4 4 4 4 4
39117 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39118 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39119 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39120 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39121 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39122 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39123 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39124 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39125 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39130 +4 4 4 4 4 4
39131 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39132 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39133 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39134 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39135 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39136 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39137 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39138 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39139 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39144 +4 4 4 4 4 4
39145 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39146 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39147 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39148 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39149 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39150 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39151 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39152 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39153 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39158 +4 4 4 4 4 4
39159 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39160 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39161 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39162 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39163 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39164 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39165 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39166 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39172 +4 4 4 4 4 4
39173 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39174 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39175 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39176 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39177 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39178 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39179 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39180 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39186 +4 4 4 4 4 4
39187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39188 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39189 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39190 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39191 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39192 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39193 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39194 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39200 +4 4 4 4 4 4
39201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39203 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39204 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39205 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39206 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39207 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39208 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39214 +4 4 4 4 4 4
39215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39216 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39217 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39218 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39219 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39220 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39221 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39222 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39228 +4 4 4 4 4 4
39229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39231 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39232 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39233 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39234 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39235 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39236 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39242 +4 4 4 4 4 4
39243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39246 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39247 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39248 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39249 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39256 +4 4 4 4 4 4
39257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39260 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39261 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39262 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39263 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39270 +4 4 4 4 4 4
39271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39274 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39275 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39276 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39277 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39284 +4 4 4 4 4 4
39285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39288 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39289 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39290 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39291 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39298 +4 4 4 4 4 4
39299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39303 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39304 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39305 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39312 +4 4 4 4 4 4
39313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39317 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39318 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39319 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39326 +4 4 4 4 4 4
39327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39331 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39332 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39333 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39340 +4 4 4 4 4 4
39341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39345 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39346 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39354 +4 4 4 4 4 4
39355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39359 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39360 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39368 +4 4 4 4 4 4
39369 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39370 index 3473e75..c930142 100644
39371 --- a/drivers/video/udlfb.c
39372 +++ b/drivers/video/udlfb.c
39373 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39374 dlfb_urb_completion(urb);
39375
39376 error:
39377 - atomic_add(bytes_sent, &dev->bytes_sent);
39378 - atomic_add(bytes_identical, &dev->bytes_identical);
39379 - atomic_add(width*height*2, &dev->bytes_rendered);
39380 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39381 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39382 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39383 end_cycles = get_cycles();
39384 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39385 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39386 >> 10)), /* Kcycles */
39387 &dev->cpu_kcycles_used);
39388
39389 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39390 dlfb_urb_completion(urb);
39391
39392 error:
39393 - atomic_add(bytes_sent, &dev->bytes_sent);
39394 - atomic_add(bytes_identical, &dev->bytes_identical);
39395 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39396 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39397 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39398 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39399 end_cycles = get_cycles();
39400 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39401 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39402 >> 10)), /* Kcycles */
39403 &dev->cpu_kcycles_used);
39404 }
39405 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39406 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39407 struct dlfb_data *dev = fb_info->par;
39408 return snprintf(buf, PAGE_SIZE, "%u\n",
39409 - atomic_read(&dev->bytes_rendered));
39410 + atomic_read_unchecked(&dev->bytes_rendered));
39411 }
39412
39413 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39414 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39415 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39416 struct dlfb_data *dev = fb_info->par;
39417 return snprintf(buf, PAGE_SIZE, "%u\n",
39418 - atomic_read(&dev->bytes_identical));
39419 + atomic_read_unchecked(&dev->bytes_identical));
39420 }
39421
39422 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39423 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39424 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39425 struct dlfb_data *dev = fb_info->par;
39426 return snprintf(buf, PAGE_SIZE, "%u\n",
39427 - atomic_read(&dev->bytes_sent));
39428 + atomic_read_unchecked(&dev->bytes_sent));
39429 }
39430
39431 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39432 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39433 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39434 struct dlfb_data *dev = fb_info->par;
39435 return snprintf(buf, PAGE_SIZE, "%u\n",
39436 - atomic_read(&dev->cpu_kcycles_used));
39437 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39438 }
39439
39440 static ssize_t edid_show(
39441 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39442 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39443 struct dlfb_data *dev = fb_info->par;
39444
39445 - atomic_set(&dev->bytes_rendered, 0);
39446 - atomic_set(&dev->bytes_identical, 0);
39447 - atomic_set(&dev->bytes_sent, 0);
39448 - atomic_set(&dev->cpu_kcycles_used, 0);
39449 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39450 + atomic_set_unchecked(&dev->bytes_identical, 0);
39451 + atomic_set_unchecked(&dev->bytes_sent, 0);
39452 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39453
39454 return count;
39455 }
39456 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39457 index 7f8472c..9842e87 100644
39458 --- a/drivers/video/uvesafb.c
39459 +++ b/drivers/video/uvesafb.c
39460 @@ -19,6 +19,7 @@
39461 #include <linux/io.h>
39462 #include <linux/mutex.h>
39463 #include <linux/slab.h>
39464 +#include <linux/moduleloader.h>
39465 #include <video/edid.h>
39466 #include <video/uvesafb.h>
39467 #ifdef CONFIG_X86
39468 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39469 NULL,
39470 };
39471
39472 - return call_usermodehelper(v86d_path, argv, envp, 1);
39473 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39474 }
39475
39476 /*
39477 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39478 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39479 par->pmi_setpal = par->ypan = 0;
39480 } else {
39481 +
39482 +#ifdef CONFIG_PAX_KERNEXEC
39483 +#ifdef CONFIG_MODULES
39484 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39485 +#endif
39486 + if (!par->pmi_code) {
39487 + par->pmi_setpal = par->ypan = 0;
39488 + return 0;
39489 + }
39490 +#endif
39491 +
39492 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39493 + task->t.regs.edi);
39494 +
39495 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39496 + pax_open_kernel();
39497 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39498 + pax_close_kernel();
39499 +
39500 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39501 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39502 +#else
39503 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39504 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39505 +#endif
39506 +
39507 printk(KERN_INFO "uvesafb: protected mode interface info at "
39508 "%04x:%04x\n",
39509 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39510 @@ -1821,6 +1844,11 @@ out:
39511 if (par->vbe_modes)
39512 kfree(par->vbe_modes);
39513
39514 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39515 + if (par->pmi_code)
39516 + module_free_exec(NULL, par->pmi_code);
39517 +#endif
39518 +
39519 framebuffer_release(info);
39520 return err;
39521 }
39522 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39523 kfree(par->vbe_state_orig);
39524 if (par->vbe_state_saved)
39525 kfree(par->vbe_state_saved);
39526 +
39527 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39528 + if (par->pmi_code)
39529 + module_free_exec(NULL, par->pmi_code);
39530 +#endif
39531 +
39532 }
39533
39534 framebuffer_release(info);
39535 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39536 index 501b340..86bd4cf 100644
39537 --- a/drivers/video/vesafb.c
39538 +++ b/drivers/video/vesafb.c
39539 @@ -9,6 +9,7 @@
39540 */
39541
39542 #include <linux/module.h>
39543 +#include <linux/moduleloader.h>
39544 #include <linux/kernel.h>
39545 #include <linux/errno.h>
39546 #include <linux/string.h>
39547 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39548 static int vram_total __initdata; /* Set total amount of memory */
39549 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39550 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39551 -static void (*pmi_start)(void) __read_mostly;
39552 -static void (*pmi_pal) (void) __read_mostly;
39553 +static void (*pmi_start)(void) __read_only;
39554 +static void (*pmi_pal) (void) __read_only;
39555 static int depth __read_mostly;
39556 static int vga_compat __read_mostly;
39557 /* --------------------------------------------------------------------- */
39558 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39559 unsigned int size_vmode;
39560 unsigned int size_remap;
39561 unsigned int size_total;
39562 + void *pmi_code = NULL;
39563
39564 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39565 return -ENODEV;
39566 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39567 size_remap = size_total;
39568 vesafb_fix.smem_len = size_remap;
39569
39570 -#ifndef __i386__
39571 - screen_info.vesapm_seg = 0;
39572 -#endif
39573 -
39574 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39575 printk(KERN_WARNING
39576 "vesafb: cannot reserve video memory at 0x%lx\n",
39577 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39578 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39579 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39580
39581 +#ifdef __i386__
39582 +
39583 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39584 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39585 + if (!pmi_code)
39586 +#elif !defined(CONFIG_PAX_KERNEXEC)
39587 + if (0)
39588 +#endif
39589 +
39590 +#endif
39591 + screen_info.vesapm_seg = 0;
39592 +
39593 if (screen_info.vesapm_seg) {
39594 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39595 - screen_info.vesapm_seg,screen_info.vesapm_off);
39596 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39597 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39598 }
39599
39600 if (screen_info.vesapm_seg < 0xc000)
39601 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39602
39603 if (ypan || pmi_setpal) {
39604 unsigned short *pmi_base;
39605 +
39606 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39607 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39608 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39609 +
39610 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39611 + pax_open_kernel();
39612 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39613 +#else
39614 + pmi_code = pmi_base;
39615 +#endif
39616 +
39617 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39618 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39619 +
39620 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39621 + pmi_start = ktva_ktla(pmi_start);
39622 + pmi_pal = ktva_ktla(pmi_pal);
39623 + pax_close_kernel();
39624 +#endif
39625 +
39626 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39627 if (pmi_base[3]) {
39628 printk(KERN_INFO "vesafb: pmi: ports = ");
39629 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39630 info->node, info->fix.id);
39631 return 0;
39632 err:
39633 +
39634 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39635 + module_free_exec(NULL, pmi_code);
39636 +#endif
39637 +
39638 if (info->screen_base)
39639 iounmap(info->screen_base);
39640 framebuffer_release(info);
39641 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39642 index 88714ae..16c2e11 100644
39643 --- a/drivers/video/via/via_clock.h
39644 +++ b/drivers/video/via/via_clock.h
39645 @@ -56,7 +56,7 @@ struct via_clock {
39646
39647 void (*set_engine_pll_state)(u8 state);
39648 void (*set_engine_pll)(struct via_pll_config config);
39649 -};
39650 +} __no_const;
39651
39652
39653 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39654 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39655 index e56c934..fc22f4b 100644
39656 --- a/drivers/xen/xen-pciback/conf_space.h
39657 +++ b/drivers/xen/xen-pciback/conf_space.h
39658 @@ -44,15 +44,15 @@ struct config_field {
39659 struct {
39660 conf_dword_write write;
39661 conf_dword_read read;
39662 - } dw;
39663 + } __no_const dw;
39664 struct {
39665 conf_word_write write;
39666 conf_word_read read;
39667 - } w;
39668 + } __no_const w;
39669 struct {
39670 conf_byte_write write;
39671 conf_byte_read read;
39672 - } b;
39673 + } __no_const b;
39674 } u;
39675 struct list_head list;
39676 };
39677 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39678 index 879ed88..bc03a01 100644
39679 --- a/fs/9p/vfs_inode.c
39680 +++ b/fs/9p/vfs_inode.c
39681 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39682 void
39683 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39684 {
39685 - char *s = nd_get_link(nd);
39686 + const char *s = nd_get_link(nd);
39687
39688 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39689 IS_ERR(s) ? "<error>" : s);
39690 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39691 index 79e2ca7..5828ad1 100644
39692 --- a/fs/Kconfig.binfmt
39693 +++ b/fs/Kconfig.binfmt
39694 @@ -86,7 +86,7 @@ config HAVE_AOUT
39695
39696 config BINFMT_AOUT
39697 tristate "Kernel support for a.out and ECOFF binaries"
39698 - depends on HAVE_AOUT
39699 + depends on HAVE_AOUT && BROKEN
39700 ---help---
39701 A.out (Assembler.OUTput) is a set of formats for libraries and
39702 executables used in the earliest versions of UNIX. Linux used
39703 diff --git a/fs/aio.c b/fs/aio.c
39704 index 969beb0..09fab51 100644
39705 --- a/fs/aio.c
39706 +++ b/fs/aio.c
39707 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39708 size += sizeof(struct io_event) * nr_events;
39709 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39710
39711 - if (nr_pages < 0)
39712 + if (nr_pages <= 0)
39713 return -EINVAL;
39714
39715 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39716 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39717 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39718 {
39719 ssize_t ret;
39720 + struct iovec iovstack;
39721
39722 #ifdef CONFIG_COMPAT
39723 if (compat)
39724 ret = compat_rw_copy_check_uvector(type,
39725 (struct compat_iovec __user *)kiocb->ki_buf,
39726 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39727 + kiocb->ki_nbytes, 1, &iovstack,
39728 &kiocb->ki_iovec, 1);
39729 else
39730 #endif
39731 ret = rw_copy_check_uvector(type,
39732 (struct iovec __user *)kiocb->ki_buf,
39733 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39734 + kiocb->ki_nbytes, 1, &iovstack,
39735 &kiocb->ki_iovec, 1);
39736 if (ret < 0)
39737 goto out;
39738
39739 + if (kiocb->ki_iovec == &iovstack) {
39740 + kiocb->ki_inline_vec = iovstack;
39741 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39742 + }
39743 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39744 kiocb->ki_cur_seg = 0;
39745 /* ki_nbytes/left now reflect bytes instead of segs */
39746 diff --git a/fs/attr.c b/fs/attr.c
39747 index 7ee7ba4..0c61a60 100644
39748 --- a/fs/attr.c
39749 +++ b/fs/attr.c
39750 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39751 unsigned long limit;
39752
39753 limit = rlimit(RLIMIT_FSIZE);
39754 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39755 if (limit != RLIM_INFINITY && offset > limit)
39756 goto out_sig;
39757 if (offset > inode->i_sb->s_maxbytes)
39758 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39759 index e1fbdee..cd5ea56 100644
39760 --- a/fs/autofs4/waitq.c
39761 +++ b/fs/autofs4/waitq.c
39762 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39763 {
39764 unsigned long sigpipe, flags;
39765 mm_segment_t fs;
39766 - const char *data = (const char *)addr;
39767 + const char __user *data = (const char __force_user *)addr;
39768 ssize_t wr = 0;
39769
39770 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39771 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39772 index 8342ca6..82fd192 100644
39773 --- a/fs/befs/linuxvfs.c
39774 +++ b/fs/befs/linuxvfs.c
39775 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39776 {
39777 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39778 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39779 - char *link = nd_get_link(nd);
39780 + const char *link = nd_get_link(nd);
39781 if (!IS_ERR(link))
39782 kfree(link);
39783 }
39784 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39785 index a6395bd..a5b24c4 100644
39786 --- a/fs/binfmt_aout.c
39787 +++ b/fs/binfmt_aout.c
39788 @@ -16,6 +16,7 @@
39789 #include <linux/string.h>
39790 #include <linux/fs.h>
39791 #include <linux/file.h>
39792 +#include <linux/security.h>
39793 #include <linux/stat.h>
39794 #include <linux/fcntl.h>
39795 #include <linux/ptrace.h>
39796 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39797 #endif
39798 # define START_STACK(u) ((void __user *)u.start_stack)
39799
39800 + memset(&dump, 0, sizeof(dump));
39801 +
39802 fs = get_fs();
39803 set_fs(KERNEL_DS);
39804 has_dumped = 1;
39805 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39806
39807 /* If the size of the dump file exceeds the rlimit, then see what would happen
39808 if we wrote the stack, but not the data area. */
39809 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39810 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39811 dump.u_dsize = 0;
39812
39813 /* Make sure we have enough room to write the stack and data areas. */
39814 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39815 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39816 dump.u_ssize = 0;
39817
39818 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39819 rlim = rlimit(RLIMIT_DATA);
39820 if (rlim >= RLIM_INFINITY)
39821 rlim = ~0;
39822 +
39823 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39824 if (ex.a_data + ex.a_bss > rlim)
39825 return -ENOMEM;
39826
39827 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39828 install_exec_creds(bprm);
39829 current->flags &= ~PF_FORKNOEXEC;
39830
39831 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39832 + current->mm->pax_flags = 0UL;
39833 +#endif
39834 +
39835 +#ifdef CONFIG_PAX_PAGEEXEC
39836 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39837 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39838 +
39839 +#ifdef CONFIG_PAX_EMUTRAMP
39840 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39841 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39842 +#endif
39843 +
39844 +#ifdef CONFIG_PAX_MPROTECT
39845 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39846 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39847 +#endif
39848 +
39849 + }
39850 +#endif
39851 +
39852 if (N_MAGIC(ex) == OMAGIC) {
39853 unsigned long text_addr, map_size;
39854 loff_t pos;
39855 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39856
39857 down_write(&current->mm->mmap_sem);
39858 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39859 - PROT_READ | PROT_WRITE | PROT_EXEC,
39860 + PROT_READ | PROT_WRITE,
39861 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39862 fd_offset + ex.a_text);
39863 up_write(&current->mm->mmap_sem);
39864 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39865 index 21ac5ee..dbf63ee 100644
39866 --- a/fs/binfmt_elf.c
39867 +++ b/fs/binfmt_elf.c
39868 @@ -32,6 +32,7 @@
39869 #include <linux/elf.h>
39870 #include <linux/utsname.h>
39871 #include <linux/coredump.h>
39872 +#include <linux/xattr.h>
39873 #include <asm/uaccess.h>
39874 #include <asm/param.h>
39875 #include <asm/page.h>
39876 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39877 #define elf_core_dump NULL
39878 #endif
39879
39880 +#ifdef CONFIG_PAX_MPROTECT
39881 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39882 +#endif
39883 +
39884 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39885 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39886 #else
39887 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39888 .load_binary = load_elf_binary,
39889 .load_shlib = load_elf_library,
39890 .core_dump = elf_core_dump,
39891 +
39892 +#ifdef CONFIG_PAX_MPROTECT
39893 + .handle_mprotect= elf_handle_mprotect,
39894 +#endif
39895 +
39896 .min_coredump = ELF_EXEC_PAGESIZE,
39897 };
39898
39899 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39900
39901 static int set_brk(unsigned long start, unsigned long end)
39902 {
39903 + unsigned long e = end;
39904 +
39905 start = ELF_PAGEALIGN(start);
39906 end = ELF_PAGEALIGN(end);
39907 if (end > start) {
39908 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39909 if (BAD_ADDR(addr))
39910 return addr;
39911 }
39912 - current->mm->start_brk = current->mm->brk = end;
39913 + current->mm->start_brk = current->mm->brk = e;
39914 return 0;
39915 }
39916
39917 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39918 elf_addr_t __user *u_rand_bytes;
39919 const char *k_platform = ELF_PLATFORM;
39920 const char *k_base_platform = ELF_BASE_PLATFORM;
39921 - unsigned char k_rand_bytes[16];
39922 + u32 k_rand_bytes[4];
39923 int items;
39924 elf_addr_t *elf_info;
39925 int ei_index = 0;
39926 const struct cred *cred = current_cred();
39927 struct vm_area_struct *vma;
39928 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39929
39930 /*
39931 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39932 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39933 * Generate 16 random bytes for userspace PRNG seeding.
39934 */
39935 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39936 - u_rand_bytes = (elf_addr_t __user *)
39937 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39938 + srandom32(k_rand_bytes[0] ^ random32());
39939 + srandom32(k_rand_bytes[1] ^ random32());
39940 + srandom32(k_rand_bytes[2] ^ random32());
39941 + srandom32(k_rand_bytes[3] ^ random32());
39942 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39943 + u_rand_bytes = (elf_addr_t __user *) p;
39944 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39945 return -EFAULT;
39946
39947 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39948 return -EFAULT;
39949 current->mm->env_end = p;
39950
39951 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39952 +
39953 /* Put the elf_info on the stack in the right place. */
39954 sp = (elf_addr_t __user *)envp + 1;
39955 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39956 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39957 return -EFAULT;
39958 return 0;
39959 }
39960 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39961 {
39962 struct elf_phdr *elf_phdata;
39963 struct elf_phdr *eppnt;
39964 - unsigned long load_addr = 0;
39965 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39966 int load_addr_set = 0;
39967 unsigned long last_bss = 0, elf_bss = 0;
39968 - unsigned long error = ~0UL;
39969 + unsigned long error = -EINVAL;
39970 unsigned long total_size;
39971 int retval, i, size;
39972
39973 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39974 goto out_close;
39975 }
39976
39977 +#ifdef CONFIG_PAX_SEGMEXEC
39978 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39979 + pax_task_size = SEGMEXEC_TASK_SIZE;
39980 +#endif
39981 +
39982 eppnt = elf_phdata;
39983 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39984 if (eppnt->p_type == PT_LOAD) {
39985 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39986 k = load_addr + eppnt->p_vaddr;
39987 if (BAD_ADDR(k) ||
39988 eppnt->p_filesz > eppnt->p_memsz ||
39989 - eppnt->p_memsz > TASK_SIZE ||
39990 - TASK_SIZE - eppnt->p_memsz < k) {
39991 + eppnt->p_memsz > pax_task_size ||
39992 + pax_task_size - eppnt->p_memsz < k) {
39993 error = -ENOMEM;
39994 goto out_close;
39995 }
39996 @@ -528,6 +552,351 @@ out:
39997 return error;
39998 }
39999
40000 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40001 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
40002 +{
40003 + unsigned long pax_flags = 0UL;
40004 +
40005 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40006 +
40007 +#ifdef CONFIG_PAX_PAGEEXEC
40008 + if (elf_phdata->p_flags & PF_PAGEEXEC)
40009 + pax_flags |= MF_PAX_PAGEEXEC;
40010 +#endif
40011 +
40012 +#ifdef CONFIG_PAX_SEGMEXEC
40013 + if (elf_phdata->p_flags & PF_SEGMEXEC)
40014 + pax_flags |= MF_PAX_SEGMEXEC;
40015 +#endif
40016 +
40017 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40018 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40019 + if ((__supported_pte_mask & _PAGE_NX))
40020 + pax_flags &= ~MF_PAX_SEGMEXEC;
40021 + else
40022 + pax_flags &= ~MF_PAX_PAGEEXEC;
40023 + }
40024 +#endif
40025 +
40026 +#ifdef CONFIG_PAX_EMUTRAMP
40027 + if (elf_phdata->p_flags & PF_EMUTRAMP)
40028 + pax_flags |= MF_PAX_EMUTRAMP;
40029 +#endif
40030 +
40031 +#ifdef CONFIG_PAX_MPROTECT
40032 + if (elf_phdata->p_flags & PF_MPROTECT)
40033 + pax_flags |= MF_PAX_MPROTECT;
40034 +#endif
40035 +
40036 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40037 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
40038 + pax_flags |= MF_PAX_RANDMMAP;
40039 +#endif
40040 +
40041 +#endif
40042 +
40043 + return pax_flags;
40044 +}
40045 +
40046 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
40047 +{
40048 + unsigned long pax_flags = 0UL;
40049 +
40050 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40051 +
40052 +#ifdef CONFIG_PAX_PAGEEXEC
40053 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
40054 + pax_flags |= MF_PAX_PAGEEXEC;
40055 +#endif
40056 +
40057 +#ifdef CONFIG_PAX_SEGMEXEC
40058 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
40059 + pax_flags |= MF_PAX_SEGMEXEC;
40060 +#endif
40061 +
40062 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40063 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40064 + if ((__supported_pte_mask & _PAGE_NX))
40065 + pax_flags &= ~MF_PAX_SEGMEXEC;
40066 + else
40067 + pax_flags &= ~MF_PAX_PAGEEXEC;
40068 + }
40069 +#endif
40070 +
40071 +#ifdef CONFIG_PAX_EMUTRAMP
40072 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
40073 + pax_flags |= MF_PAX_EMUTRAMP;
40074 +#endif
40075 +
40076 +#ifdef CONFIG_PAX_MPROTECT
40077 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
40078 + pax_flags |= MF_PAX_MPROTECT;
40079 +#endif
40080 +
40081 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40082 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
40083 + pax_flags |= MF_PAX_RANDMMAP;
40084 +#endif
40085 +
40086 +#endif
40087 +
40088 + return pax_flags;
40089 +}
40090 +
40091 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
40092 +{
40093 + unsigned long pax_flags = 0UL;
40094 +
40095 +#ifdef CONFIG_PAX_EI_PAX
40096 +
40097 +#ifdef CONFIG_PAX_PAGEEXEC
40098 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
40099 + pax_flags |= MF_PAX_PAGEEXEC;
40100 +#endif
40101 +
40102 +#ifdef CONFIG_PAX_SEGMEXEC
40103 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
40104 + pax_flags |= MF_PAX_SEGMEXEC;
40105 +#endif
40106 +
40107 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40108 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40109 + if ((__supported_pte_mask & _PAGE_NX))
40110 + pax_flags &= ~MF_PAX_SEGMEXEC;
40111 + else
40112 + pax_flags &= ~MF_PAX_PAGEEXEC;
40113 + }
40114 +#endif
40115 +
40116 +#ifdef CONFIG_PAX_EMUTRAMP
40117 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
40118 + pax_flags |= MF_PAX_EMUTRAMP;
40119 +#endif
40120 +
40121 +#ifdef CONFIG_PAX_MPROTECT
40122 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
40123 + pax_flags |= MF_PAX_MPROTECT;
40124 +#endif
40125 +
40126 +#ifdef CONFIG_PAX_ASLR
40127 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
40128 + pax_flags |= MF_PAX_RANDMMAP;
40129 +#endif
40130 +
40131 +#else
40132 +
40133 +#ifdef CONFIG_PAX_PAGEEXEC
40134 + pax_flags |= MF_PAX_PAGEEXEC;
40135 +#endif
40136 +
40137 +#ifdef CONFIG_PAX_MPROTECT
40138 + pax_flags |= MF_PAX_MPROTECT;
40139 +#endif
40140 +
40141 +#ifdef CONFIG_PAX_RANDMMAP
40142 + pax_flags |= MF_PAX_RANDMMAP;
40143 +#endif
40144 +
40145 +#ifdef CONFIG_PAX_SEGMEXEC
40146 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
40147 + pax_flags &= ~MF_PAX_PAGEEXEC;
40148 + pax_flags |= MF_PAX_SEGMEXEC;
40149 + }
40150 +#endif
40151 +
40152 +#endif
40153 +
40154 + return pax_flags;
40155 +}
40156 +
40157 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40158 +{
40159 +
40160 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40161 + unsigned long i;
40162 +
40163 + for (i = 0UL; i < elf_ex->e_phnum; i++)
40164 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40165 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40166 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40167 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40168 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40169 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40170 + return ~0UL;
40171 +
40172 +#ifdef CONFIG_PAX_SOFTMODE
40173 + if (pax_softmode)
40174 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
40175 + else
40176 +#endif
40177 +
40178 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
40179 + break;
40180 + }
40181 +#endif
40182 +
40183 + return ~0UL;
40184 +}
40185 +
40186 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40187 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
40188 +{
40189 + unsigned long pax_flags = 0UL;
40190 +
40191 +#ifdef CONFIG_PAX_PAGEEXEC
40192 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
40193 + pax_flags |= MF_PAX_PAGEEXEC;
40194 +#endif
40195 +
40196 +#ifdef CONFIG_PAX_SEGMEXEC
40197 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
40198 + pax_flags |= MF_PAX_SEGMEXEC;
40199 +#endif
40200 +
40201 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40202 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40203 + if ((__supported_pte_mask & _PAGE_NX))
40204 + pax_flags &= ~MF_PAX_SEGMEXEC;
40205 + else
40206 + pax_flags &= ~MF_PAX_PAGEEXEC;
40207 + }
40208 +#endif
40209 +
40210 +#ifdef CONFIG_PAX_EMUTRAMP
40211 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40212 + pax_flags |= MF_PAX_EMUTRAMP;
40213 +#endif
40214 +
40215 +#ifdef CONFIG_PAX_MPROTECT
40216 + if (pax_flags_softmode & MF_PAX_MPROTECT)
40217 + pax_flags |= MF_PAX_MPROTECT;
40218 +#endif
40219 +
40220 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40221 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40222 + pax_flags |= MF_PAX_RANDMMAP;
40223 +#endif
40224 +
40225 + return pax_flags;
40226 +}
40227 +
40228 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40229 +{
40230 + unsigned long pax_flags = 0UL;
40231 +
40232 +#ifdef CONFIG_PAX_PAGEEXEC
40233 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40234 + pax_flags |= MF_PAX_PAGEEXEC;
40235 +#endif
40236 +
40237 +#ifdef CONFIG_PAX_SEGMEXEC
40238 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40239 + pax_flags |= MF_PAX_SEGMEXEC;
40240 +#endif
40241 +
40242 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40243 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40244 + if ((__supported_pte_mask & _PAGE_NX))
40245 + pax_flags &= ~MF_PAX_SEGMEXEC;
40246 + else
40247 + pax_flags &= ~MF_PAX_PAGEEXEC;
40248 + }
40249 +#endif
40250 +
40251 +#ifdef CONFIG_PAX_EMUTRAMP
40252 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40253 + pax_flags |= MF_PAX_EMUTRAMP;
40254 +#endif
40255 +
40256 +#ifdef CONFIG_PAX_MPROTECT
40257 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40258 + pax_flags |= MF_PAX_MPROTECT;
40259 +#endif
40260 +
40261 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40262 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40263 + pax_flags |= MF_PAX_RANDMMAP;
40264 +#endif
40265 +
40266 + return pax_flags;
40267 +}
40268 +#endif
40269 +
40270 +static unsigned long pax_parse_xattr_pax(struct file * const file)
40271 +{
40272 +
40273 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40274 + ssize_t xattr_size, i;
40275 + unsigned char xattr_value[5];
40276 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40277 +
40278 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40279 + if (xattr_size <= 0)
40280 + return ~0UL;
40281 +
40282 + for (i = 0; i < xattr_size; i++)
40283 + switch (xattr_value[i]) {
40284 + default:
40285 + return ~0UL;
40286 +
40287 +#define parse_flag(option1, option2, flag) \
40288 + case option1: \
40289 + pax_flags_hardmode |= MF_PAX_##flag; \
40290 + break; \
40291 + case option2: \
40292 + pax_flags_softmode |= MF_PAX_##flag; \
40293 + break;
40294 +
40295 + parse_flag('p', 'P', PAGEEXEC);
40296 + parse_flag('e', 'E', EMUTRAMP);
40297 + parse_flag('m', 'M', MPROTECT);
40298 + parse_flag('r', 'R', RANDMMAP);
40299 + parse_flag('s', 'S', SEGMEXEC);
40300 +
40301 +#undef parse_flag
40302 + }
40303 +
40304 + if (pax_flags_hardmode & pax_flags_softmode)
40305 + return ~0UL;
40306 +
40307 +#ifdef CONFIG_PAX_SOFTMODE
40308 + if (pax_softmode)
40309 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40310 + else
40311 +#endif
40312 +
40313 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40314 +#else
40315 + return ~0UL;
40316 +#endif
40317 +
40318 +}
40319 +
40320 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40321 +{
40322 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40323 +
40324 + pax_flags = pax_parse_ei_pax(elf_ex);
40325 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40326 + xattr_pax_flags = pax_parse_xattr_pax(file);
40327 +
40328 + if (pt_pax_flags == ~0UL)
40329 + pt_pax_flags = xattr_pax_flags;
40330 + else if (xattr_pax_flags == ~0UL)
40331 + xattr_pax_flags = pt_pax_flags;
40332 + if (pt_pax_flags != xattr_pax_flags)
40333 + return -EINVAL;
40334 + if (pt_pax_flags != ~0UL)
40335 + pax_flags = pt_pax_flags;
40336 +
40337 + if (0 > pax_check_flags(&pax_flags))
40338 + return -EINVAL;
40339 +
40340 + current->mm->pax_flags = pax_flags;
40341 + return 0;
40342 +}
40343 +#endif
40344 +
40345 /*
40346 * These are the functions used to load ELF style executables and shared
40347 * libraries. There is no binary dependent code anywhere else.
40348 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40349 {
40350 unsigned int random_variable = 0;
40351
40352 +#ifdef CONFIG_PAX_RANDUSTACK
40353 + if (randomize_va_space)
40354 + return stack_top - current->mm->delta_stack;
40355 +#endif
40356 +
40357 if ((current->flags & PF_RANDOMIZE) &&
40358 !(current->personality & ADDR_NO_RANDOMIZE)) {
40359 random_variable = get_random_int() & STACK_RND_MASK;
40360 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40361 unsigned long load_addr = 0, load_bias = 0;
40362 int load_addr_set = 0;
40363 char * elf_interpreter = NULL;
40364 - unsigned long error;
40365 + unsigned long error = 0;
40366 struct elf_phdr *elf_ppnt, *elf_phdata;
40367 unsigned long elf_bss, elf_brk;
40368 int retval, i;
40369 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40370 unsigned long start_code, end_code, start_data, end_data;
40371 unsigned long reloc_func_desc __maybe_unused = 0;
40372 int executable_stack = EXSTACK_DEFAULT;
40373 - unsigned long def_flags = 0;
40374 struct {
40375 struct elfhdr elf_ex;
40376 struct elfhdr interp_elf_ex;
40377 } *loc;
40378 + unsigned long pax_task_size = TASK_SIZE;
40379
40380 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40381 if (!loc) {
40382 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40383
40384 /* OK, This is the point of no return */
40385 current->flags &= ~PF_FORKNOEXEC;
40386 - current->mm->def_flags = def_flags;
40387 +
40388 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40389 + current->mm->pax_flags = 0UL;
40390 +#endif
40391 +
40392 +#ifdef CONFIG_PAX_DLRESOLVE
40393 + current->mm->call_dl_resolve = 0UL;
40394 +#endif
40395 +
40396 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40397 + current->mm->call_syscall = 0UL;
40398 +#endif
40399 +
40400 +#ifdef CONFIG_PAX_ASLR
40401 + current->mm->delta_mmap = 0UL;
40402 + current->mm->delta_stack = 0UL;
40403 +#endif
40404 +
40405 + current->mm->def_flags = 0;
40406 +
40407 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40408 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40409 + send_sig(SIGKILL, current, 0);
40410 + goto out_free_dentry;
40411 + }
40412 +#endif
40413 +
40414 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40415 + pax_set_initial_flags(bprm);
40416 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40417 + if (pax_set_initial_flags_func)
40418 + (pax_set_initial_flags_func)(bprm);
40419 +#endif
40420 +
40421 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40422 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40423 + current->mm->context.user_cs_limit = PAGE_SIZE;
40424 + current->mm->def_flags |= VM_PAGEEXEC;
40425 + }
40426 +#endif
40427 +
40428 +#ifdef CONFIG_PAX_SEGMEXEC
40429 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40430 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40431 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40432 + pax_task_size = SEGMEXEC_TASK_SIZE;
40433 + current->mm->def_flags |= VM_NOHUGEPAGE;
40434 + }
40435 +#endif
40436 +
40437 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40438 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40439 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40440 + put_cpu();
40441 + }
40442 +#endif
40443
40444 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40445 may depend on the personality. */
40446 SET_PERSONALITY(loc->elf_ex);
40447 +
40448 +#ifdef CONFIG_PAX_ASLR
40449 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40450 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40451 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40452 + }
40453 +#endif
40454 +
40455 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40456 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40457 + executable_stack = EXSTACK_DISABLE_X;
40458 + current->personality &= ~READ_IMPLIES_EXEC;
40459 + } else
40460 +#endif
40461 +
40462 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40463 current->personality |= READ_IMPLIES_EXEC;
40464
40465 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40466 #else
40467 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40468 #endif
40469 +
40470 +#ifdef CONFIG_PAX_RANDMMAP
40471 + /* PaX: randomize base address at the default exe base if requested */
40472 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40473 +#ifdef CONFIG_SPARC64
40474 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40475 +#else
40476 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40477 +#endif
40478 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40479 + elf_flags |= MAP_FIXED;
40480 + }
40481 +#endif
40482 +
40483 }
40484
40485 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40486 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40487 * allowed task size. Note that p_filesz must always be
40488 * <= p_memsz so it is only necessary to check p_memsz.
40489 */
40490 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40491 - elf_ppnt->p_memsz > TASK_SIZE ||
40492 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40493 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40494 + elf_ppnt->p_memsz > pax_task_size ||
40495 + pax_task_size - elf_ppnt->p_memsz < k) {
40496 /* set_brk can never work. Avoid overflows. */
40497 send_sig(SIGKILL, current, 0);
40498 retval = -EINVAL;
40499 @@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40500 start_data += load_bias;
40501 end_data += load_bias;
40502
40503 +#ifdef CONFIG_PAX_RANDMMAP
40504 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40505 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40506 +#endif
40507 +
40508 /* Calling set_brk effectively mmaps the pages that we need
40509 * for the bss and break sections. We must do this before
40510 * mapping in the interpreter, to make sure it doesn't wind
40511 @@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40512 goto out_free_dentry;
40513 }
40514 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40515 - send_sig(SIGSEGV, current, 0);
40516 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40517 - goto out_free_dentry;
40518 + /*
40519 + * This bss-zeroing can fail if the ELF
40520 + * file specifies odd protections. So
40521 + * we don't check the return value
40522 + */
40523 }
40524
40525 if (elf_interpreter) {
40526 @@ -1098,7 +1563,7 @@ out:
40527 * Decide what to dump of a segment, part, all or none.
40528 */
40529 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40530 - unsigned long mm_flags)
40531 + unsigned long mm_flags, long signr)
40532 {
40533 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40534
40535 @@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40536 if (vma->vm_file == NULL)
40537 return 0;
40538
40539 - if (FILTER(MAPPED_PRIVATE))
40540 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40541 goto whole;
40542
40543 /*
40544 @@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40545 {
40546 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40547 int i = 0;
40548 - do
40549 + do {
40550 i += 2;
40551 - while (auxv[i - 2] != AT_NULL);
40552 + } while (auxv[i - 2] != AT_NULL);
40553 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40554 }
40555
40556 @@ -1421,7 +1886,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
40557 for (i = 1; i < view->n; ++i) {
40558 const struct user_regset *regset = &view->regsets[i];
40559 do_thread_regset_writeback(t->task, regset);
40560 - if (regset->core_note_type &&
40561 + if (regset->core_note_type && regset->get &&
40562 (!regset->active || regset->active(t->task, regset))) {
40563 int ret;
40564 size_t size = regset->n * regset->size;
40565 @@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40566 }
40567
40568 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40569 - unsigned long mm_flags)
40570 + struct coredump_params *cprm)
40571 {
40572 struct vm_area_struct *vma;
40573 size_t size = 0;
40574
40575 for (vma = first_vma(current, gate_vma); vma != NULL;
40576 vma = next_vma(vma, gate_vma))
40577 - size += vma_dump_size(vma, mm_flags);
40578 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40579 return size;
40580 }
40581
40582 @@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40583
40584 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40585
40586 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40587 + offset += elf_core_vma_data_size(gate_vma, cprm);
40588 offset += elf_core_extra_data_size();
40589 e_shoff = offset;
40590
40591 @@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40592 offset = dataoff;
40593
40594 size += sizeof(*elf);
40595 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40596 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40597 goto end_coredump;
40598
40599 size += sizeof(*phdr4note);
40600 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40601 if (size > cprm->limit
40602 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40603 goto end_coredump;
40604 @@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40605 phdr.p_offset = offset;
40606 phdr.p_vaddr = vma->vm_start;
40607 phdr.p_paddr = 0;
40608 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40609 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40610 phdr.p_memsz = vma->vm_end - vma->vm_start;
40611 offset += phdr.p_filesz;
40612 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40613 @@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40614 phdr.p_align = ELF_EXEC_PAGESIZE;
40615
40616 size += sizeof(phdr);
40617 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40618 if (size > cprm->limit
40619 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40620 goto end_coredump;
40621 @@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40622 unsigned long addr;
40623 unsigned long end;
40624
40625 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40626 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40627
40628 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40629 struct page *page;
40630 @@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40631 page = get_dump_page(addr);
40632 if (page) {
40633 void *kaddr = kmap(page);
40634 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40635 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40636 !dump_write(cprm->file, kaddr,
40637 PAGE_SIZE);
40638 @@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40639
40640 if (e_phnum == PN_XNUM) {
40641 size += sizeof(*shdr4extnum);
40642 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40643 if (size > cprm->limit
40644 || !dump_write(cprm->file, shdr4extnum,
40645 sizeof(*shdr4extnum)))
40646 @@ -2075,6 +2545,97 @@ out:
40647
40648 #endif /* CONFIG_ELF_CORE */
40649
40650 +#ifdef CONFIG_PAX_MPROTECT
40651 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40652 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40653 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40654 + *
40655 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40656 + * basis because we want to allow the common case and not the special ones.
40657 + */
40658 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40659 +{
40660 + struct elfhdr elf_h;
40661 + struct elf_phdr elf_p;
40662 + unsigned long i;
40663 + unsigned long oldflags;
40664 + bool is_textrel_rw, is_textrel_rx, is_relro;
40665 +
40666 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40667 + return;
40668 +
40669 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40670 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40671 +
40672 +#ifdef CONFIG_PAX_ELFRELOCS
40673 + /* possible TEXTREL */
40674 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40675 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40676 +#else
40677 + is_textrel_rw = false;
40678 + is_textrel_rx = false;
40679 +#endif
40680 +
40681 + /* possible RELRO */
40682 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40683 +
40684 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40685 + return;
40686 +
40687 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40688 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40689 +
40690 +#ifdef CONFIG_PAX_ETEXECRELOCS
40691 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40692 +#else
40693 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40694 +#endif
40695 +
40696 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40697 + !elf_check_arch(&elf_h) ||
40698 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40699 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40700 + return;
40701 +
40702 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40703 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40704 + return;
40705 + switch (elf_p.p_type) {
40706 + case PT_DYNAMIC:
40707 + if (!is_textrel_rw && !is_textrel_rx)
40708 + continue;
40709 + i = 0UL;
40710 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40711 + elf_dyn dyn;
40712 +
40713 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40714 + return;
40715 + if (dyn.d_tag == DT_NULL)
40716 + return;
40717 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40718 + gr_log_textrel(vma);
40719 + if (is_textrel_rw)
40720 + vma->vm_flags |= VM_MAYWRITE;
40721 + else
40722 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40723 + vma->vm_flags &= ~VM_MAYWRITE;
40724 + return;
40725 + }
40726 + i++;
40727 + }
40728 + return;
40729 +
40730 + case PT_GNU_RELRO:
40731 + if (!is_relro)
40732 + continue;
40733 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40734 + vma->vm_flags &= ~VM_MAYWRITE;
40735 + return;
40736 + }
40737 + }
40738 +}
40739 +#endif
40740 +
40741 static int __init init_elf_binfmt(void)
40742 {
40743 return register_binfmt(&elf_format);
40744 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40745 index 1bffbe0..c8c283e 100644
40746 --- a/fs/binfmt_flat.c
40747 +++ b/fs/binfmt_flat.c
40748 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40749 realdatastart = (unsigned long) -ENOMEM;
40750 printk("Unable to allocate RAM for process data, errno %d\n",
40751 (int)-realdatastart);
40752 + down_write(&current->mm->mmap_sem);
40753 do_munmap(current->mm, textpos, text_len);
40754 + up_write(&current->mm->mmap_sem);
40755 ret = realdatastart;
40756 goto err;
40757 }
40758 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40759 }
40760 if (IS_ERR_VALUE(result)) {
40761 printk("Unable to read data+bss, errno %d\n", (int)-result);
40762 + down_write(&current->mm->mmap_sem);
40763 do_munmap(current->mm, textpos, text_len);
40764 do_munmap(current->mm, realdatastart, len);
40765 + up_write(&current->mm->mmap_sem);
40766 ret = result;
40767 goto err;
40768 }
40769 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40770 }
40771 if (IS_ERR_VALUE(result)) {
40772 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40773 + down_write(&current->mm->mmap_sem);
40774 do_munmap(current->mm, textpos, text_len + data_len + extra +
40775 MAX_SHARED_LIBS * sizeof(unsigned long));
40776 + up_write(&current->mm->mmap_sem);
40777 ret = result;
40778 goto err;
40779 }
40780 diff --git a/fs/bio.c b/fs/bio.c
40781 index b1fe82c..84da0a9 100644
40782 --- a/fs/bio.c
40783 +++ b/fs/bio.c
40784 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40785 const int read = bio_data_dir(bio) == READ;
40786 struct bio_map_data *bmd = bio->bi_private;
40787 int i;
40788 - char *p = bmd->sgvecs[0].iov_base;
40789 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40790
40791 __bio_for_each_segment(bvec, bio, i, 0) {
40792 char *addr = page_address(bvec->bv_page);
40793 diff --git a/fs/block_dev.c b/fs/block_dev.c
40794 index b07f1da..9efcb92 100644
40795 --- a/fs/block_dev.c
40796 +++ b/fs/block_dev.c
40797 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40798 else if (bdev->bd_contains == bdev)
40799 return true; /* is a whole device which isn't held */
40800
40801 - else if (whole->bd_holder == bd_may_claim)
40802 + else if (whole->bd_holder == (void *)bd_may_claim)
40803 return true; /* is a partition of a device that is being partitioned */
40804 else if (whole->bd_holder != NULL)
40805 return false; /* is a partition of a held device */
40806 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40807 index dede441..f2a2507 100644
40808 --- a/fs/btrfs/ctree.c
40809 +++ b/fs/btrfs/ctree.c
40810 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40811 free_extent_buffer(buf);
40812 add_root_to_dirty_list(root);
40813 } else {
40814 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40815 - parent_start = parent->start;
40816 - else
40817 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40818 + if (parent)
40819 + parent_start = parent->start;
40820 + else
40821 + parent_start = 0;
40822 + } else
40823 parent_start = 0;
40824
40825 WARN_ON(trans->transid != btrfs_header_generation(parent));
40826 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40827 index fd1a06d..6e9033d 100644
40828 --- a/fs/btrfs/inode.c
40829 +++ b/fs/btrfs/inode.c
40830 @@ -6895,7 +6895,7 @@ fail:
40831 return -ENOMEM;
40832 }
40833
40834 -static int btrfs_getattr(struct vfsmount *mnt,
40835 +int btrfs_getattr(struct vfsmount *mnt,
40836 struct dentry *dentry, struct kstat *stat)
40837 {
40838 struct inode *inode = dentry->d_inode;
40839 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40840 return 0;
40841 }
40842
40843 +EXPORT_SYMBOL(btrfs_getattr);
40844 +
40845 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40846 +{
40847 + return BTRFS_I(inode)->root->anon_dev;
40848 +}
40849 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40850 +
40851 /*
40852 * If a file is moved, it will inherit the cow and compression flags of the new
40853 * directory.
40854 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40855 index c04f02c..f5c9e2e 100644
40856 --- a/fs/btrfs/ioctl.c
40857 +++ b/fs/btrfs/ioctl.c
40858 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40859 for (i = 0; i < num_types; i++) {
40860 struct btrfs_space_info *tmp;
40861
40862 + /* Don't copy in more than we allocated */
40863 if (!slot_count)
40864 break;
40865
40866 + slot_count--;
40867 +
40868 info = NULL;
40869 rcu_read_lock();
40870 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40871 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40872 memcpy(dest, &space, sizeof(space));
40873 dest++;
40874 space_args.total_spaces++;
40875 - slot_count--;
40876 }
40877 - if (!slot_count)
40878 - break;
40879 }
40880 up_read(&info->groups_sem);
40881 }
40882
40883 - user_dest = (struct btrfs_ioctl_space_info *)
40884 + user_dest = (struct btrfs_ioctl_space_info __user *)
40885 (arg + sizeof(struct btrfs_ioctl_space_args));
40886
40887 if (copy_to_user(user_dest, dest_orig, alloc_size))
40888 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40889 index cfb5543..1ae7347 100644
40890 --- a/fs/btrfs/relocation.c
40891 +++ b/fs/btrfs/relocation.c
40892 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40893 }
40894 spin_unlock(&rc->reloc_root_tree.lock);
40895
40896 - BUG_ON((struct btrfs_root *)node->data != root);
40897 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40898
40899 if (!del) {
40900 spin_lock(&rc->reloc_root_tree.lock);
40901 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40902 index 622f469..e8d2d55 100644
40903 --- a/fs/cachefiles/bind.c
40904 +++ b/fs/cachefiles/bind.c
40905 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40906 args);
40907
40908 /* start by checking things over */
40909 - ASSERT(cache->fstop_percent >= 0 &&
40910 - cache->fstop_percent < cache->fcull_percent &&
40911 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40912 cache->fcull_percent < cache->frun_percent &&
40913 cache->frun_percent < 100);
40914
40915 - ASSERT(cache->bstop_percent >= 0 &&
40916 - cache->bstop_percent < cache->bcull_percent &&
40917 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40918 cache->bcull_percent < cache->brun_percent &&
40919 cache->brun_percent < 100);
40920
40921 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40922 index 0a1467b..6a53245 100644
40923 --- a/fs/cachefiles/daemon.c
40924 +++ b/fs/cachefiles/daemon.c
40925 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40926 if (n > buflen)
40927 return -EMSGSIZE;
40928
40929 - if (copy_to_user(_buffer, buffer, n) != 0)
40930 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40931 return -EFAULT;
40932
40933 return n;
40934 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40935 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40936 return -EIO;
40937
40938 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40939 + if (datalen > PAGE_SIZE - 1)
40940 return -EOPNOTSUPP;
40941
40942 /* drag the command string into the kernel so we can parse it */
40943 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40944 if (args[0] != '%' || args[1] != '\0')
40945 return -EINVAL;
40946
40947 - if (fstop < 0 || fstop >= cache->fcull_percent)
40948 + if (fstop >= cache->fcull_percent)
40949 return cachefiles_daemon_range_error(cache, args);
40950
40951 cache->fstop_percent = fstop;
40952 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40953 if (args[0] != '%' || args[1] != '\0')
40954 return -EINVAL;
40955
40956 - if (bstop < 0 || bstop >= cache->bcull_percent)
40957 + if (bstop >= cache->bcull_percent)
40958 return cachefiles_daemon_range_error(cache, args);
40959
40960 cache->bstop_percent = bstop;
40961 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40962 index bd6bc1b..b627b53 100644
40963 --- a/fs/cachefiles/internal.h
40964 +++ b/fs/cachefiles/internal.h
40965 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40966 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40967 struct rb_root active_nodes; /* active nodes (can't be culled) */
40968 rwlock_t active_lock; /* lock for active_nodes */
40969 - atomic_t gravecounter; /* graveyard uniquifier */
40970 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40971 unsigned frun_percent; /* when to stop culling (% files) */
40972 unsigned fcull_percent; /* when to start culling (% files) */
40973 unsigned fstop_percent; /* when to stop allocating (% files) */
40974 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40975 * proc.c
40976 */
40977 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40978 -extern atomic_t cachefiles_lookup_histogram[HZ];
40979 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40980 -extern atomic_t cachefiles_create_histogram[HZ];
40981 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40982 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40983 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40984
40985 extern int __init cachefiles_proc_init(void);
40986 extern void cachefiles_proc_cleanup(void);
40987 static inline
40988 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40989 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40990 {
40991 unsigned long jif = jiffies - start_jif;
40992 if (jif >= HZ)
40993 jif = HZ - 1;
40994 - atomic_inc(&histogram[jif]);
40995 + atomic_inc_unchecked(&histogram[jif]);
40996 }
40997
40998 #else
40999 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
41000 index a0358c2..d6137f2 100644
41001 --- a/fs/cachefiles/namei.c
41002 +++ b/fs/cachefiles/namei.c
41003 @@ -318,7 +318,7 @@ try_again:
41004 /* first step is to make up a grave dentry in the graveyard */
41005 sprintf(nbuffer, "%08x%08x",
41006 (uint32_t) get_seconds(),
41007 - (uint32_t) atomic_inc_return(&cache->gravecounter));
41008 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
41009
41010 /* do the multiway lock magic */
41011 trap = lock_rename(cache->graveyard, dir);
41012 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
41013 index eccd339..4c1d995 100644
41014 --- a/fs/cachefiles/proc.c
41015 +++ b/fs/cachefiles/proc.c
41016 @@ -14,9 +14,9 @@
41017 #include <linux/seq_file.h>
41018 #include "internal.h"
41019
41020 -atomic_t cachefiles_lookup_histogram[HZ];
41021 -atomic_t cachefiles_mkdir_histogram[HZ];
41022 -atomic_t cachefiles_create_histogram[HZ];
41023 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
41024 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
41025 +atomic_unchecked_t cachefiles_create_histogram[HZ];
41026
41027 /*
41028 * display the latency histogram
41029 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
41030 return 0;
41031 default:
41032 index = (unsigned long) v - 3;
41033 - x = atomic_read(&cachefiles_lookup_histogram[index]);
41034 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
41035 - z = atomic_read(&cachefiles_create_histogram[index]);
41036 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
41037 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
41038 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
41039 if (x == 0 && y == 0 && z == 0)
41040 return 0;
41041
41042 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
41043 index 0e3c092..818480e 100644
41044 --- a/fs/cachefiles/rdwr.c
41045 +++ b/fs/cachefiles/rdwr.c
41046 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
41047 old_fs = get_fs();
41048 set_fs(KERNEL_DS);
41049 ret = file->f_op->write(
41050 - file, (const void __user *) data, len, &pos);
41051 + file, (const void __force_user *) data, len, &pos);
41052 set_fs(old_fs);
41053 kunmap(page);
41054 if (ret != len)
41055 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
41056 index 9895400..fa40a7d 100644
41057 --- a/fs/ceph/dir.c
41058 +++ b/fs/ceph/dir.c
41059 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
41060 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
41061 struct ceph_mds_client *mdsc = fsc->mdsc;
41062 unsigned frag = fpos_frag(filp->f_pos);
41063 - int off = fpos_off(filp->f_pos);
41064 + unsigned int off = fpos_off(filp->f_pos);
41065 int err;
41066 u32 ftype;
41067 struct ceph_mds_reply_info_parsed *rinfo;
41068 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
41069 index 84e8c07..6170d31 100644
41070 --- a/fs/cifs/cifs_debug.c
41071 +++ b/fs/cifs/cifs_debug.c
41072 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41073
41074 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
41075 #ifdef CONFIG_CIFS_STATS2
41076 - atomic_set(&totBufAllocCount, 0);
41077 - atomic_set(&totSmBufAllocCount, 0);
41078 + atomic_set_unchecked(&totBufAllocCount, 0);
41079 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41080 #endif /* CONFIG_CIFS_STATS2 */
41081 spin_lock(&cifs_tcp_ses_lock);
41082 list_for_each(tmp1, &cifs_tcp_ses_list) {
41083 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41084 tcon = list_entry(tmp3,
41085 struct cifs_tcon,
41086 tcon_list);
41087 - atomic_set(&tcon->num_smbs_sent, 0);
41088 - atomic_set(&tcon->num_writes, 0);
41089 - atomic_set(&tcon->num_reads, 0);
41090 - atomic_set(&tcon->num_oplock_brks, 0);
41091 - atomic_set(&tcon->num_opens, 0);
41092 - atomic_set(&tcon->num_posixopens, 0);
41093 - atomic_set(&tcon->num_posixmkdirs, 0);
41094 - atomic_set(&tcon->num_closes, 0);
41095 - atomic_set(&tcon->num_deletes, 0);
41096 - atomic_set(&tcon->num_mkdirs, 0);
41097 - atomic_set(&tcon->num_rmdirs, 0);
41098 - atomic_set(&tcon->num_renames, 0);
41099 - atomic_set(&tcon->num_t2renames, 0);
41100 - atomic_set(&tcon->num_ffirst, 0);
41101 - atomic_set(&tcon->num_fnext, 0);
41102 - atomic_set(&tcon->num_fclose, 0);
41103 - atomic_set(&tcon->num_hardlinks, 0);
41104 - atomic_set(&tcon->num_symlinks, 0);
41105 - atomic_set(&tcon->num_locks, 0);
41106 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
41107 + atomic_set_unchecked(&tcon->num_writes, 0);
41108 + atomic_set_unchecked(&tcon->num_reads, 0);
41109 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
41110 + atomic_set_unchecked(&tcon->num_opens, 0);
41111 + atomic_set_unchecked(&tcon->num_posixopens, 0);
41112 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
41113 + atomic_set_unchecked(&tcon->num_closes, 0);
41114 + atomic_set_unchecked(&tcon->num_deletes, 0);
41115 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
41116 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
41117 + atomic_set_unchecked(&tcon->num_renames, 0);
41118 + atomic_set_unchecked(&tcon->num_t2renames, 0);
41119 + atomic_set_unchecked(&tcon->num_ffirst, 0);
41120 + atomic_set_unchecked(&tcon->num_fnext, 0);
41121 + atomic_set_unchecked(&tcon->num_fclose, 0);
41122 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
41123 + atomic_set_unchecked(&tcon->num_symlinks, 0);
41124 + atomic_set_unchecked(&tcon->num_locks, 0);
41125 }
41126 }
41127 }
41128 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41129 smBufAllocCount.counter, cifs_min_small);
41130 #ifdef CONFIG_CIFS_STATS2
41131 seq_printf(m, "Total Large %d Small %d Allocations\n",
41132 - atomic_read(&totBufAllocCount),
41133 - atomic_read(&totSmBufAllocCount));
41134 + atomic_read_unchecked(&totBufAllocCount),
41135 + atomic_read_unchecked(&totSmBufAllocCount));
41136 #endif /* CONFIG_CIFS_STATS2 */
41137
41138 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
41139 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41140 if (tcon->need_reconnect)
41141 seq_puts(m, "\tDISCONNECTED ");
41142 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
41143 - atomic_read(&tcon->num_smbs_sent),
41144 - atomic_read(&tcon->num_oplock_brks));
41145 + atomic_read_unchecked(&tcon->num_smbs_sent),
41146 + atomic_read_unchecked(&tcon->num_oplock_brks));
41147 seq_printf(m, "\nReads: %d Bytes: %lld",
41148 - atomic_read(&tcon->num_reads),
41149 + atomic_read_unchecked(&tcon->num_reads),
41150 (long long)(tcon->bytes_read));
41151 seq_printf(m, "\nWrites: %d Bytes: %lld",
41152 - atomic_read(&tcon->num_writes),
41153 + atomic_read_unchecked(&tcon->num_writes),
41154 (long long)(tcon->bytes_written));
41155 seq_printf(m, "\nFlushes: %d",
41156 - atomic_read(&tcon->num_flushes));
41157 + atomic_read_unchecked(&tcon->num_flushes));
41158 seq_printf(m, "\nLocks: %d HardLinks: %d "
41159 "Symlinks: %d",
41160 - atomic_read(&tcon->num_locks),
41161 - atomic_read(&tcon->num_hardlinks),
41162 - atomic_read(&tcon->num_symlinks));
41163 + atomic_read_unchecked(&tcon->num_locks),
41164 + atomic_read_unchecked(&tcon->num_hardlinks),
41165 + atomic_read_unchecked(&tcon->num_symlinks));
41166 seq_printf(m, "\nOpens: %d Closes: %d "
41167 "Deletes: %d",
41168 - atomic_read(&tcon->num_opens),
41169 - atomic_read(&tcon->num_closes),
41170 - atomic_read(&tcon->num_deletes));
41171 + atomic_read_unchecked(&tcon->num_opens),
41172 + atomic_read_unchecked(&tcon->num_closes),
41173 + atomic_read_unchecked(&tcon->num_deletes));
41174 seq_printf(m, "\nPosix Opens: %d "
41175 "Posix Mkdirs: %d",
41176 - atomic_read(&tcon->num_posixopens),
41177 - atomic_read(&tcon->num_posixmkdirs));
41178 + atomic_read_unchecked(&tcon->num_posixopens),
41179 + atomic_read_unchecked(&tcon->num_posixmkdirs));
41180 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
41181 - atomic_read(&tcon->num_mkdirs),
41182 - atomic_read(&tcon->num_rmdirs));
41183 + atomic_read_unchecked(&tcon->num_mkdirs),
41184 + atomic_read_unchecked(&tcon->num_rmdirs));
41185 seq_printf(m, "\nRenames: %d T2 Renames %d",
41186 - atomic_read(&tcon->num_renames),
41187 - atomic_read(&tcon->num_t2renames));
41188 + atomic_read_unchecked(&tcon->num_renames),
41189 + atomic_read_unchecked(&tcon->num_t2renames));
41190 seq_printf(m, "\nFindFirst: %d FNext %d "
41191 "FClose %d",
41192 - atomic_read(&tcon->num_ffirst),
41193 - atomic_read(&tcon->num_fnext),
41194 - atomic_read(&tcon->num_fclose));
41195 + atomic_read_unchecked(&tcon->num_ffirst),
41196 + atomic_read_unchecked(&tcon->num_fnext),
41197 + atomic_read_unchecked(&tcon->num_fclose));
41198 }
41199 }
41200 }
41201 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
41202 index 8f1fe32..38f9e27 100644
41203 --- a/fs/cifs/cifsfs.c
41204 +++ b/fs/cifs/cifsfs.c
41205 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
41206 cifs_req_cachep = kmem_cache_create("cifs_request",
41207 CIFSMaxBufSize +
41208 MAX_CIFS_HDR_SIZE, 0,
41209 - SLAB_HWCACHE_ALIGN, NULL);
41210 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41211 if (cifs_req_cachep == NULL)
41212 return -ENOMEM;
41213
41214 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41215 efficient to alloc 1 per page off the slab compared to 17K (5page)
41216 alloc of large cifs buffers even when page debugging is on */
41217 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41218 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41219 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41220 NULL);
41221 if (cifs_sm_req_cachep == NULL) {
41222 mempool_destroy(cifs_req_poolp);
41223 @@ -1101,8 +1101,8 @@ init_cifs(void)
41224 atomic_set(&bufAllocCount, 0);
41225 atomic_set(&smBufAllocCount, 0);
41226 #ifdef CONFIG_CIFS_STATS2
41227 - atomic_set(&totBufAllocCount, 0);
41228 - atomic_set(&totSmBufAllocCount, 0);
41229 + atomic_set_unchecked(&totBufAllocCount, 0);
41230 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41231 #endif /* CONFIG_CIFS_STATS2 */
41232
41233 atomic_set(&midCount, 0);
41234 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41235 index 8238aa1..0347196 100644
41236 --- a/fs/cifs/cifsglob.h
41237 +++ b/fs/cifs/cifsglob.h
41238 @@ -392,28 +392,28 @@ struct cifs_tcon {
41239 __u16 Flags; /* optional support bits */
41240 enum statusEnum tidStatus;
41241 #ifdef CONFIG_CIFS_STATS
41242 - atomic_t num_smbs_sent;
41243 - atomic_t num_writes;
41244 - atomic_t num_reads;
41245 - atomic_t num_flushes;
41246 - atomic_t num_oplock_brks;
41247 - atomic_t num_opens;
41248 - atomic_t num_closes;
41249 - atomic_t num_deletes;
41250 - atomic_t num_mkdirs;
41251 - atomic_t num_posixopens;
41252 - atomic_t num_posixmkdirs;
41253 - atomic_t num_rmdirs;
41254 - atomic_t num_renames;
41255 - atomic_t num_t2renames;
41256 - atomic_t num_ffirst;
41257 - atomic_t num_fnext;
41258 - atomic_t num_fclose;
41259 - atomic_t num_hardlinks;
41260 - atomic_t num_symlinks;
41261 - atomic_t num_locks;
41262 - atomic_t num_acl_get;
41263 - atomic_t num_acl_set;
41264 + atomic_unchecked_t num_smbs_sent;
41265 + atomic_unchecked_t num_writes;
41266 + atomic_unchecked_t num_reads;
41267 + atomic_unchecked_t num_flushes;
41268 + atomic_unchecked_t num_oplock_brks;
41269 + atomic_unchecked_t num_opens;
41270 + atomic_unchecked_t num_closes;
41271 + atomic_unchecked_t num_deletes;
41272 + atomic_unchecked_t num_mkdirs;
41273 + atomic_unchecked_t num_posixopens;
41274 + atomic_unchecked_t num_posixmkdirs;
41275 + atomic_unchecked_t num_rmdirs;
41276 + atomic_unchecked_t num_renames;
41277 + atomic_unchecked_t num_t2renames;
41278 + atomic_unchecked_t num_ffirst;
41279 + atomic_unchecked_t num_fnext;
41280 + atomic_unchecked_t num_fclose;
41281 + atomic_unchecked_t num_hardlinks;
41282 + atomic_unchecked_t num_symlinks;
41283 + atomic_unchecked_t num_locks;
41284 + atomic_unchecked_t num_acl_get;
41285 + atomic_unchecked_t num_acl_set;
41286 #ifdef CONFIG_CIFS_STATS2
41287 unsigned long long time_writes;
41288 unsigned long long time_reads;
41289 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41290 }
41291
41292 #ifdef CONFIG_CIFS_STATS
41293 -#define cifs_stats_inc atomic_inc
41294 +#define cifs_stats_inc atomic_inc_unchecked
41295
41296 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41297 unsigned int bytes)
41298 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41299 /* Various Debug counters */
41300 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41301 #ifdef CONFIG_CIFS_STATS2
41302 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41303 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41304 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41305 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41306 #endif
41307 GLOBAL_EXTERN atomic_t smBufAllocCount;
41308 GLOBAL_EXTERN atomic_t midCount;
41309 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41310 index 6b0e064..94e6c3c 100644
41311 --- a/fs/cifs/link.c
41312 +++ b/fs/cifs/link.c
41313 @@ -600,7 +600,7 @@ symlink_exit:
41314
41315 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41316 {
41317 - char *p = nd_get_link(nd);
41318 + const char *p = nd_get_link(nd);
41319 if (!IS_ERR(p))
41320 kfree(p);
41321 }
41322 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41323 index 703ef5c..2a44ed5 100644
41324 --- a/fs/cifs/misc.c
41325 +++ b/fs/cifs/misc.c
41326 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41327 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41328 atomic_inc(&bufAllocCount);
41329 #ifdef CONFIG_CIFS_STATS2
41330 - atomic_inc(&totBufAllocCount);
41331 + atomic_inc_unchecked(&totBufAllocCount);
41332 #endif /* CONFIG_CIFS_STATS2 */
41333 }
41334
41335 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41336 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41337 atomic_inc(&smBufAllocCount);
41338 #ifdef CONFIG_CIFS_STATS2
41339 - atomic_inc(&totSmBufAllocCount);
41340 + atomic_inc_unchecked(&totSmBufAllocCount);
41341 #endif /* CONFIG_CIFS_STATS2 */
41342
41343 }
41344 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41345 index 6901578..d402eb5 100644
41346 --- a/fs/coda/cache.c
41347 +++ b/fs/coda/cache.c
41348 @@ -24,7 +24,7 @@
41349 #include "coda_linux.h"
41350 #include "coda_cache.h"
41351
41352 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41353 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41354
41355 /* replace or extend an acl cache hit */
41356 void coda_cache_enter(struct inode *inode, int mask)
41357 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41358 struct coda_inode_info *cii = ITOC(inode);
41359
41360 spin_lock(&cii->c_lock);
41361 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41362 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41363 if (cii->c_uid != current_fsuid()) {
41364 cii->c_uid = current_fsuid();
41365 cii->c_cached_perm = mask;
41366 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41367 {
41368 struct coda_inode_info *cii = ITOC(inode);
41369 spin_lock(&cii->c_lock);
41370 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41371 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41372 spin_unlock(&cii->c_lock);
41373 }
41374
41375 /* remove all acl caches */
41376 void coda_cache_clear_all(struct super_block *sb)
41377 {
41378 - atomic_inc(&permission_epoch);
41379 + atomic_inc_unchecked(&permission_epoch);
41380 }
41381
41382
41383 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41384 spin_lock(&cii->c_lock);
41385 hit = (mask & cii->c_cached_perm) == mask &&
41386 cii->c_uid == current_fsuid() &&
41387 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41388 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41389 spin_unlock(&cii->c_lock);
41390
41391 return hit;
41392 diff --git a/fs/compat.c b/fs/compat.c
41393 index c987875..08771ca 100644
41394 --- a/fs/compat.c
41395 +++ b/fs/compat.c
41396 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41397 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41398 {
41399 compat_ino_t ino = stat->ino;
41400 - typeof(ubuf->st_uid) uid = 0;
41401 - typeof(ubuf->st_gid) gid = 0;
41402 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41403 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41404 int err;
41405
41406 SET_UID(uid, stat->uid);
41407 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41408
41409 set_fs(KERNEL_DS);
41410 /* The __user pointer cast is valid because of the set_fs() */
41411 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41412 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41413 set_fs(oldfs);
41414 /* truncating is ok because it's a user address */
41415 if (!ret)
41416 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41417 goto out;
41418
41419 ret = -EINVAL;
41420 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41421 + if (nr_segs > UIO_MAXIOV)
41422 goto out;
41423 if (nr_segs > fast_segs) {
41424 ret = -ENOMEM;
41425 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41426
41427 struct compat_readdir_callback {
41428 struct compat_old_linux_dirent __user *dirent;
41429 + struct file * file;
41430 int result;
41431 };
41432
41433 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41434 buf->result = -EOVERFLOW;
41435 return -EOVERFLOW;
41436 }
41437 +
41438 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41439 + return 0;
41440 +
41441 buf->result++;
41442 dirent = buf->dirent;
41443 if (!access_ok(VERIFY_WRITE, dirent,
41444 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41445
41446 buf.result = 0;
41447 buf.dirent = dirent;
41448 + buf.file = file;
41449
41450 error = vfs_readdir(file, compat_fillonedir, &buf);
41451 if (buf.result)
41452 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
41453 struct compat_getdents_callback {
41454 struct compat_linux_dirent __user *current_dir;
41455 struct compat_linux_dirent __user *previous;
41456 + struct file * file;
41457 int count;
41458 int error;
41459 };
41460 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41461 buf->error = -EOVERFLOW;
41462 return -EOVERFLOW;
41463 }
41464 +
41465 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41466 + return 0;
41467 +
41468 dirent = buf->previous;
41469 if (dirent) {
41470 if (__put_user(offset, &dirent->d_off))
41471 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41472 buf.previous = NULL;
41473 buf.count = count;
41474 buf.error = 0;
41475 + buf.file = file;
41476
41477 error = vfs_readdir(file, compat_filldir, &buf);
41478 if (error >= 0)
41479 @@ -1003,6 +1015,7 @@ out:
41480 struct compat_getdents_callback64 {
41481 struct linux_dirent64 __user *current_dir;
41482 struct linux_dirent64 __user *previous;
41483 + struct file * file;
41484 int count;
41485 int error;
41486 };
41487 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41488 buf->error = -EINVAL; /* only used if we fail.. */
41489 if (reclen > buf->count)
41490 return -EINVAL;
41491 +
41492 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41493 + return 0;
41494 +
41495 dirent = buf->previous;
41496
41497 if (dirent) {
41498 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41499 buf.previous = NULL;
41500 buf.count = count;
41501 buf.error = 0;
41502 + buf.file = file;
41503
41504 error = vfs_readdir(file, compat_filldir64, &buf);
41505 if (error >= 0)
41506 error = buf.error;
41507 lastdirent = buf.previous;
41508 if (lastdirent) {
41509 - typeof(lastdirent->d_off) d_off = file->f_pos;
41510 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41511 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41512 error = -EFAULT;
41513 else
41514 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41515 index 112e45a..b59845b 100644
41516 --- a/fs/compat_binfmt_elf.c
41517 +++ b/fs/compat_binfmt_elf.c
41518 @@ -30,11 +30,13 @@
41519 #undef elf_phdr
41520 #undef elf_shdr
41521 #undef elf_note
41522 +#undef elf_dyn
41523 #undef elf_addr_t
41524 #define elfhdr elf32_hdr
41525 #define elf_phdr elf32_phdr
41526 #define elf_shdr elf32_shdr
41527 #define elf_note elf32_note
41528 +#define elf_dyn Elf32_Dyn
41529 #define elf_addr_t Elf32_Addr
41530
41531 /*
41532 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41533 index 51352de..93292ff 100644
41534 --- a/fs/compat_ioctl.c
41535 +++ b/fs/compat_ioctl.c
41536 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41537
41538 err = get_user(palp, &up->palette);
41539 err |= get_user(length, &up->length);
41540 + if (err)
41541 + return -EFAULT;
41542
41543 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41544 err = put_user(compat_ptr(palp), &up_native->palette);
41545 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41546 return -EFAULT;
41547 if (__get_user(udata, &ss32->iomem_base))
41548 return -EFAULT;
41549 - ss.iomem_base = compat_ptr(udata);
41550 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41551 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41552 __get_user(ss.port_high, &ss32->port_high))
41553 return -EFAULT;
41554 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41555 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41556 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41557 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41558 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41559 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41560 return -EFAULT;
41561
41562 return ioctl_preallocate(file, p);
41563 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41564 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41565 {
41566 unsigned int a, b;
41567 - a = *(unsigned int *)p;
41568 - b = *(unsigned int *)q;
41569 + a = *(const unsigned int *)p;
41570 + b = *(const unsigned int *)q;
41571 if (a > b)
41572 return 1;
41573 if (a < b)
41574 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41575 index 9a37a9b..35792b6 100644
41576 --- a/fs/configfs/dir.c
41577 +++ b/fs/configfs/dir.c
41578 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41579 }
41580 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41581 struct configfs_dirent *next;
41582 - const char * name;
41583 + const unsigned char * name;
41584 + char d_name[sizeof(next->s_dentry->d_iname)];
41585 int len;
41586 struct inode *inode = NULL;
41587
41588 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41589 continue;
41590
41591 name = configfs_get_name(next);
41592 - len = strlen(name);
41593 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41594 + len = next->s_dentry->d_name.len;
41595 + memcpy(d_name, name, len);
41596 + name = d_name;
41597 + } else
41598 + len = strlen(name);
41599
41600 /*
41601 * We'll have a dentry and an inode for
41602 diff --git a/fs/dcache.c b/fs/dcache.c
41603 index f7908ae..920a680 100644
41604 --- a/fs/dcache.c
41605 +++ b/fs/dcache.c
41606 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41607 mempages -= reserve;
41608
41609 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41610 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41611 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41612
41613 dcache_init();
41614 inode_init();
41615 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41616 index f3a257d..715ac0f 100644
41617 --- a/fs/debugfs/inode.c
41618 +++ b/fs/debugfs/inode.c
41619 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41620 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41621 {
41622 return debugfs_create_file(name,
41623 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41624 + S_IFDIR | S_IRWXU,
41625 +#else
41626 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41627 +#endif
41628 parent, NULL, NULL);
41629 }
41630 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41631 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41632 index af11098..81e3bbe 100644
41633 --- a/fs/ecryptfs/inode.c
41634 +++ b/fs/ecryptfs/inode.c
41635 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41636 old_fs = get_fs();
41637 set_fs(get_ds());
41638 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41639 - (char __user *)lower_buf,
41640 + (char __force_user *)lower_buf,
41641 lower_bufsiz);
41642 set_fs(old_fs);
41643 if (rc < 0)
41644 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41645 }
41646 old_fs = get_fs();
41647 set_fs(get_ds());
41648 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41649 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41650 set_fs(old_fs);
41651 if (rc < 0) {
41652 kfree(buf);
41653 @@ -752,7 +752,7 @@ out:
41654 static void
41655 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41656 {
41657 - char *buf = nd_get_link(nd);
41658 + const char *buf = nd_get_link(nd);
41659 if (!IS_ERR(buf)) {
41660 /* Free the char* */
41661 kfree(buf);
41662 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41663 index 0dc5a3d..d3cdeea 100644
41664 --- a/fs/ecryptfs/miscdev.c
41665 +++ b/fs/ecryptfs/miscdev.c
41666 @@ -328,7 +328,7 @@ check_list:
41667 goto out_unlock_msg_ctx;
41668 i = 5;
41669 if (msg_ctx->msg) {
41670 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41671 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41672 goto out_unlock_msg_ctx;
41673 i += packet_length_size;
41674 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41675 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41676 index 608c1c3..7d040a8 100644
41677 --- a/fs/ecryptfs/read_write.c
41678 +++ b/fs/ecryptfs/read_write.c
41679 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41680 return -EIO;
41681 fs_save = get_fs();
41682 set_fs(get_ds());
41683 - rc = vfs_write(lower_file, data, size, &offset);
41684 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41685 set_fs(fs_save);
41686 mark_inode_dirty_sync(ecryptfs_inode);
41687 return rc;
41688 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41689 return -EIO;
41690 fs_save = get_fs();
41691 set_fs(get_ds());
41692 - rc = vfs_read(lower_file, data, size, &offset);
41693 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41694 set_fs(fs_save);
41695 return rc;
41696 }
41697 diff --git a/fs/exec.c b/fs/exec.c
41698 index 3625464..04855f9 100644
41699 --- a/fs/exec.c
41700 +++ b/fs/exec.c
41701 @@ -55,12 +55,28 @@
41702 #include <linux/pipe_fs_i.h>
41703 #include <linux/oom.h>
41704 #include <linux/compat.h>
41705 +#include <linux/random.h>
41706 +#include <linux/seq_file.h>
41707 +
41708 +#ifdef CONFIG_PAX_REFCOUNT
41709 +#include <linux/kallsyms.h>
41710 +#include <linux/kdebug.h>
41711 +#endif
41712
41713 #include <asm/uaccess.h>
41714 #include <asm/mmu_context.h>
41715 #include <asm/tlb.h>
41716 #include "internal.h"
41717
41718 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41719 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41720 +#endif
41721 +
41722 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41723 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41724 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41725 +#endif
41726 +
41727 int core_uses_pid;
41728 char core_pattern[CORENAME_MAX_SIZE] = "core";
41729 unsigned int core_pipe_limit;
41730 @@ -70,7 +86,7 @@ struct core_name {
41731 char *corename;
41732 int used, size;
41733 };
41734 -static atomic_t call_count = ATOMIC_INIT(1);
41735 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41736
41737 /* The maximal length of core_pattern is also specified in sysctl.c */
41738
41739 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41740 int write)
41741 {
41742 struct page *page;
41743 - int ret;
41744
41745 -#ifdef CONFIG_STACK_GROWSUP
41746 - if (write) {
41747 - ret = expand_downwards(bprm->vma, pos);
41748 - if (ret < 0)
41749 - return NULL;
41750 - }
41751 -#endif
41752 - ret = get_user_pages(current, bprm->mm, pos,
41753 - 1, write, 1, &page, NULL);
41754 - if (ret <= 0)
41755 + if (0 > expand_downwards(bprm->vma, pos))
41756 + return NULL;
41757 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41758 return NULL;
41759
41760 if (write) {
41761 @@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41762 if (size <= ARG_MAX)
41763 return page;
41764
41765 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41766 + // only allow 1MB for argv+env on suid/sgid binaries
41767 + // to prevent easy ASLR exhaustion
41768 + if (((bprm->cred->euid != current_euid()) ||
41769 + (bprm->cred->egid != current_egid())) &&
41770 + (size > (1024 * 1024))) {
41771 + put_page(page);
41772 + return NULL;
41773 + }
41774 +#endif
41775 +
41776 /*
41777 * Limit to 1/4-th the stack size for the argv+env strings.
41778 * This ensures that:
41779 @@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41780 vma->vm_end = STACK_TOP_MAX;
41781 vma->vm_start = vma->vm_end - PAGE_SIZE;
41782 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41783 +
41784 +#ifdef CONFIG_PAX_SEGMEXEC
41785 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41786 +#endif
41787 +
41788 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41789 INIT_LIST_HEAD(&vma->anon_vma_chain);
41790
41791 @@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41792 mm->stack_vm = mm->total_vm = 1;
41793 up_write(&mm->mmap_sem);
41794 bprm->p = vma->vm_end - sizeof(void *);
41795 +
41796 +#ifdef CONFIG_PAX_RANDUSTACK
41797 + if (randomize_va_space)
41798 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41799 +#endif
41800 +
41801 return 0;
41802 err:
41803 up_write(&mm->mmap_sem);
41804 @@ -396,19 +426,7 @@ err:
41805 return err;
41806 }
41807
41808 -struct user_arg_ptr {
41809 -#ifdef CONFIG_COMPAT
41810 - bool is_compat;
41811 -#endif
41812 - union {
41813 - const char __user *const __user *native;
41814 -#ifdef CONFIG_COMPAT
41815 - compat_uptr_t __user *compat;
41816 -#endif
41817 - } ptr;
41818 -};
41819 -
41820 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41821 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41822 {
41823 const char __user *native;
41824
41825 @@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41826 compat_uptr_t compat;
41827
41828 if (get_user(compat, argv.ptr.compat + nr))
41829 - return ERR_PTR(-EFAULT);
41830 + return (const char __force_user *)ERR_PTR(-EFAULT);
41831
41832 return compat_ptr(compat);
41833 }
41834 #endif
41835
41836 if (get_user(native, argv.ptr.native + nr))
41837 - return ERR_PTR(-EFAULT);
41838 + return (const char __force_user *)ERR_PTR(-EFAULT);
41839
41840 return native;
41841 }
41842 @@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
41843 if (!p)
41844 break;
41845
41846 - if (IS_ERR(p))
41847 + if (IS_ERR((const char __force_kernel *)p))
41848 return -EFAULT;
41849
41850 if (i++ >= max)
41851 @@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41852
41853 ret = -EFAULT;
41854 str = get_user_arg_ptr(argv, argc);
41855 - if (IS_ERR(str))
41856 + if (IS_ERR((const char __force_kernel *)str))
41857 goto out;
41858
41859 len = strnlen_user(str, MAX_ARG_STRLEN);
41860 @@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41861 int r;
41862 mm_segment_t oldfs = get_fs();
41863 struct user_arg_ptr argv = {
41864 - .ptr.native = (const char __user *const __user *)__argv,
41865 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41866 };
41867
41868 set_fs(KERNEL_DS);
41869 @@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41870 unsigned long new_end = old_end - shift;
41871 struct mmu_gather tlb;
41872
41873 - BUG_ON(new_start > new_end);
41874 + if (new_start >= new_end || new_start < mmap_min_addr)
41875 + return -ENOMEM;
41876
41877 /*
41878 * ensure there are no vmas between where we want to go
41879 @@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41880 if (vma != find_vma(mm, new_start))
41881 return -EFAULT;
41882
41883 +#ifdef CONFIG_PAX_SEGMEXEC
41884 + BUG_ON(pax_find_mirror_vma(vma));
41885 +#endif
41886 +
41887 /*
41888 * cover the whole range: [new_start, old_end)
41889 */
41890 @@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41891 stack_top = arch_align_stack(stack_top);
41892 stack_top = PAGE_ALIGN(stack_top);
41893
41894 - if (unlikely(stack_top < mmap_min_addr) ||
41895 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41896 - return -ENOMEM;
41897 -
41898 stack_shift = vma->vm_end - stack_top;
41899
41900 bprm->p -= stack_shift;
41901 @@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41902 bprm->exec -= stack_shift;
41903
41904 down_write(&mm->mmap_sem);
41905 +
41906 + /* Move stack pages down in memory. */
41907 + if (stack_shift) {
41908 + ret = shift_arg_pages(vma, stack_shift);
41909 + if (ret)
41910 + goto out_unlock;
41911 + }
41912 +
41913 vm_flags = VM_STACK_FLAGS;
41914
41915 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41916 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41917 + vm_flags &= ~VM_EXEC;
41918 +
41919 +#ifdef CONFIG_PAX_MPROTECT
41920 + if (mm->pax_flags & MF_PAX_MPROTECT)
41921 + vm_flags &= ~VM_MAYEXEC;
41922 +#endif
41923 +
41924 + }
41925 +#endif
41926 +
41927 /*
41928 * Adjust stack execute permissions; explicitly enable for
41929 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41930 @@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41931 goto out_unlock;
41932 BUG_ON(prev != vma);
41933
41934 - /* Move stack pages down in memory. */
41935 - if (stack_shift) {
41936 - ret = shift_arg_pages(vma, stack_shift);
41937 - if (ret)
41938 - goto out_unlock;
41939 - }
41940 -
41941 /* mprotect_fixup is overkill to remove the temporary stack flags */
41942 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41943
41944 @@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
41945 old_fs = get_fs();
41946 set_fs(get_ds());
41947 /* The cast to a user pointer is valid due to the set_fs() */
41948 - result = vfs_read(file, (void __user *)addr, count, &pos);
41949 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41950 set_fs(old_fs);
41951 return result;
41952 }
41953 @@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
41954 perf_event_comm(tsk);
41955 }
41956
41957 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
41958 +{
41959 + int i, ch;
41960 +
41961 + /* Copies the binary name from after last slash */
41962 + for (i = 0; (ch = *(fn++)) != '\0';) {
41963 + if (ch == '/')
41964 + i = 0; /* overwrite what we wrote */
41965 + else
41966 + if (i < len - 1)
41967 + tcomm[i++] = ch;
41968 + }
41969 + tcomm[i] = '\0';
41970 +}
41971 +
41972 int flush_old_exec(struct linux_binprm * bprm)
41973 {
41974 int retval;
41975 @@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
41976
41977 set_mm_exe_file(bprm->mm, bprm->file);
41978
41979 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
41980 /*
41981 * Release all of the old mmap stuff
41982 */
41983 @@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
41984
41985 void setup_new_exec(struct linux_binprm * bprm)
41986 {
41987 - int i, ch;
41988 - const char *name;
41989 - char tcomm[sizeof(current->comm)];
41990 -
41991 arch_pick_mmap_layout(current->mm);
41992
41993 /* This is the point of no return */
41994 @@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
41995 else
41996 set_dumpable(current->mm, suid_dumpable);
41997
41998 - name = bprm->filename;
41999 -
42000 - /* Copies the binary name from after last slash */
42001 - for (i=0; (ch = *(name++)) != '\0';) {
42002 - if (ch == '/')
42003 - i = 0; /* overwrite what we wrote */
42004 - else
42005 - if (i < (sizeof(tcomm) - 1))
42006 - tcomm[i++] = ch;
42007 - }
42008 - tcomm[i] = '\0';
42009 - set_task_comm(current, tcomm);
42010 + set_task_comm(current, bprm->tcomm);
42011
42012 /* Set the new mm task size. We have to do that late because it may
42013 * depend on TIF_32BIT which is only updated in flush_thread() on
42014 @@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
42015 }
42016 rcu_read_unlock();
42017
42018 - if (p->fs->users > n_fs) {
42019 + if (atomic_read(&p->fs->users) > n_fs) {
42020 bprm->unsafe |= LSM_UNSAFE_SHARE;
42021 } else {
42022 res = -EAGAIN;
42023 @@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
42024
42025 EXPORT_SYMBOL(search_binary_handler);
42026
42027 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42028 +static atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
42029 +#endif
42030 +
42031 /*
42032 * sys_execve() executes a new program.
42033 */
42034 @@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
42035 struct user_arg_ptr envp,
42036 struct pt_regs *regs)
42037 {
42038 +#ifdef CONFIG_GRKERNSEC
42039 + struct file *old_exec_file;
42040 + struct acl_subject_label *old_acl;
42041 + struct rlimit old_rlim[RLIM_NLIMITS];
42042 +#endif
42043 struct linux_binprm *bprm;
42044 struct file *file;
42045 struct files_struct *displaced;
42046 @@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
42047 int retval;
42048 const struct cred *cred = current_cred();
42049
42050 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42051 +
42052 /*
42053 * We move the actual failure in case of RLIMIT_NPROC excess from
42054 * set*uid() to execve() because too many poorly written programs
42055 @@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
42056 if (IS_ERR(file))
42057 goto out_unmark;
42058
42059 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
42060 + retval = -EPERM;
42061 + goto out_file;
42062 + }
42063 +
42064 sched_exec();
42065
42066 bprm->file = file;
42067 bprm->filename = filename;
42068 bprm->interp = filename;
42069
42070 + if (gr_process_user_ban()) {
42071 + retval = -EPERM;
42072 + goto out_file;
42073 + }
42074 +
42075 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42076 + retval = -EACCES;
42077 + goto out_file;
42078 + }
42079 +
42080 retval = bprm_mm_init(bprm);
42081 if (retval)
42082 goto out_file;
42083 @@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
42084 if (retval < 0)
42085 goto out;
42086
42087 + if (!gr_tpe_allow(file)) {
42088 + retval = -EACCES;
42089 + goto out;
42090 + }
42091 +
42092 + if (gr_check_crash_exec(file)) {
42093 + retval = -EACCES;
42094 + goto out;
42095 + }
42096 +
42097 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42098 +
42099 + gr_handle_exec_args(bprm, argv);
42100 +
42101 +#ifdef CONFIG_GRKERNSEC
42102 + old_acl = current->acl;
42103 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42104 + old_exec_file = current->exec_file;
42105 + get_file(file);
42106 + current->exec_file = file;
42107 +#endif
42108 +
42109 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42110 + bprm->unsafe);
42111 + if (retval < 0)
42112 + goto out_fail;
42113 +
42114 retval = search_binary_handler(bprm,regs);
42115 if (retval < 0)
42116 - goto out;
42117 + goto out_fail;
42118 +#ifdef CONFIG_GRKERNSEC
42119 + if (old_exec_file)
42120 + fput(old_exec_file);
42121 +#endif
42122
42123 /* execve succeeded */
42124 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42125 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
42126 +#endif
42127 +
42128 current->fs->in_exec = 0;
42129 current->in_execve = 0;
42130 acct_update_integrals(current);
42131 @@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
42132 put_files_struct(displaced);
42133 return retval;
42134
42135 +out_fail:
42136 +#ifdef CONFIG_GRKERNSEC
42137 + current->acl = old_acl;
42138 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42139 + fput(current->exec_file);
42140 + current->exec_file = old_exec_file;
42141 +#endif
42142 +
42143 out:
42144 if (bprm->mm) {
42145 acct_arg_size(bprm, 0);
42146 @@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
42147 {
42148 char *old_corename = cn->corename;
42149
42150 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42151 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42152 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42153
42154 if (!cn->corename) {
42155 @@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
42156 int pid_in_pattern = 0;
42157 int err = 0;
42158
42159 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42160 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42161 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42162 cn->used = 0;
42163
42164 @@ -1812,6 +1914,218 @@ out:
42165 return ispipe;
42166 }
42167
42168 +int pax_check_flags(unsigned long *flags)
42169 +{
42170 + int retval = 0;
42171 +
42172 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42173 + if (*flags & MF_PAX_SEGMEXEC)
42174 + {
42175 + *flags &= ~MF_PAX_SEGMEXEC;
42176 + retval = -EINVAL;
42177 + }
42178 +#endif
42179 +
42180 + if ((*flags & MF_PAX_PAGEEXEC)
42181 +
42182 +#ifdef CONFIG_PAX_PAGEEXEC
42183 + && (*flags & MF_PAX_SEGMEXEC)
42184 +#endif
42185 +
42186 + )
42187 + {
42188 + *flags &= ~MF_PAX_PAGEEXEC;
42189 + retval = -EINVAL;
42190 + }
42191 +
42192 + if ((*flags & MF_PAX_MPROTECT)
42193 +
42194 +#ifdef CONFIG_PAX_MPROTECT
42195 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42196 +#endif
42197 +
42198 + )
42199 + {
42200 + *flags &= ~MF_PAX_MPROTECT;
42201 + retval = -EINVAL;
42202 + }
42203 +
42204 + if ((*flags & MF_PAX_EMUTRAMP)
42205 +
42206 +#ifdef CONFIG_PAX_EMUTRAMP
42207 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42208 +#endif
42209 +
42210 + )
42211 + {
42212 + *flags &= ~MF_PAX_EMUTRAMP;
42213 + retval = -EINVAL;
42214 + }
42215 +
42216 + return retval;
42217 +}
42218 +
42219 +EXPORT_SYMBOL(pax_check_flags);
42220 +
42221 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42222 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42223 +{
42224 + struct task_struct *tsk = current;
42225 + struct mm_struct *mm = current->mm;
42226 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42227 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42228 + char *path_exec = NULL;
42229 + char *path_fault = NULL;
42230 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
42231 +
42232 + if (buffer_exec && buffer_fault) {
42233 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42234 +
42235 + down_read(&mm->mmap_sem);
42236 + vma = mm->mmap;
42237 + while (vma && (!vma_exec || !vma_fault)) {
42238 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42239 + vma_exec = vma;
42240 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42241 + vma_fault = vma;
42242 + vma = vma->vm_next;
42243 + }
42244 + if (vma_exec) {
42245 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42246 + if (IS_ERR(path_exec))
42247 + path_exec = "<path too long>";
42248 + else {
42249 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42250 + if (path_exec) {
42251 + *path_exec = 0;
42252 + path_exec = buffer_exec;
42253 + } else
42254 + path_exec = "<path too long>";
42255 + }
42256 + }
42257 + if (vma_fault) {
42258 + start = vma_fault->vm_start;
42259 + end = vma_fault->vm_end;
42260 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42261 + if (vma_fault->vm_file) {
42262 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42263 + if (IS_ERR(path_fault))
42264 + path_fault = "<path too long>";
42265 + else {
42266 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42267 + if (path_fault) {
42268 + *path_fault = 0;
42269 + path_fault = buffer_fault;
42270 + } else
42271 + path_fault = "<path too long>";
42272 + }
42273 + } else
42274 + path_fault = "<anonymous mapping>";
42275 + }
42276 + up_read(&mm->mmap_sem);
42277 + }
42278 + if (tsk->signal->curr_ip)
42279 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42280 + else
42281 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42282 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42283 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42284 + task_uid(tsk), task_euid(tsk), pc, sp);
42285 + free_page((unsigned long)buffer_exec);
42286 + free_page((unsigned long)buffer_fault);
42287 + pax_report_insns(regs, pc, sp);
42288 + do_coredump(SIGKILL, SIGKILL, regs);
42289 +}
42290 +#endif
42291 +
42292 +#ifdef CONFIG_PAX_REFCOUNT
42293 +void pax_report_refcount_overflow(struct pt_regs *regs)
42294 +{
42295 + if (current->signal->curr_ip)
42296 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42297 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42298 + else
42299 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42300 + current->comm, task_pid_nr(current), current_uid(), current_euid());
42301 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42302 + show_regs(regs);
42303 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42304 +}
42305 +#endif
42306 +
42307 +#ifdef CONFIG_PAX_USERCOPY
42308 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42309 +int object_is_on_stack(const void *obj, unsigned long len)
42310 +{
42311 + const void * const stack = task_stack_page(current);
42312 + const void * const stackend = stack + THREAD_SIZE;
42313 +
42314 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42315 + const void *frame = NULL;
42316 + const void *oldframe;
42317 +#endif
42318 +
42319 + if (obj + len < obj)
42320 + return -1;
42321 +
42322 + if (obj + len <= stack || stackend <= obj)
42323 + return 0;
42324 +
42325 + if (obj < stack || stackend < obj + len)
42326 + return -1;
42327 +
42328 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42329 + oldframe = __builtin_frame_address(1);
42330 + if (oldframe)
42331 + frame = __builtin_frame_address(2);
42332 + /*
42333 + low ----------------------------------------------> high
42334 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
42335 + ^----------------^
42336 + allow copies only within here
42337 + */
42338 + while (stack <= frame && frame < stackend) {
42339 + /* if obj + len extends past the last frame, this
42340 + check won't pass and the next frame will be 0,
42341 + causing us to bail out and correctly report
42342 + the copy as invalid
42343 + */
42344 + if (obj + len <= frame)
42345 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42346 + oldframe = frame;
42347 + frame = *(const void * const *)frame;
42348 + }
42349 + return -1;
42350 +#else
42351 + return 1;
42352 +#endif
42353 +}
42354 +
42355 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42356 +{
42357 + if (current->signal->curr_ip)
42358 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42359 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42360 + else
42361 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42362 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42363 + dump_stack();
42364 + gr_handle_kernel_exploit();
42365 + do_group_exit(SIGKILL);
42366 +}
42367 +#endif
42368 +
42369 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42370 +void pax_track_stack(void)
42371 +{
42372 + unsigned long sp = (unsigned long)&sp;
42373 + if (sp < current_thread_info()->lowest_stack &&
42374 + sp > (unsigned long)task_stack_page(current))
42375 + current_thread_info()->lowest_stack = sp;
42376 +}
42377 +EXPORT_SYMBOL(pax_track_stack);
42378 +#endif
42379 +
42380 static int zap_process(struct task_struct *start, int exit_code)
42381 {
42382 struct task_struct *t;
42383 @@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
42384 pipe = file->f_path.dentry->d_inode->i_pipe;
42385
42386 pipe_lock(pipe);
42387 - pipe->readers++;
42388 - pipe->writers--;
42389 + atomic_inc(&pipe->readers);
42390 + atomic_dec(&pipe->writers);
42391
42392 - while ((pipe->readers > 1) && (!signal_pending(current))) {
42393 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42394 wake_up_interruptible_sync(&pipe->wait);
42395 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42396 pipe_wait(pipe);
42397 }
42398
42399 - pipe->readers--;
42400 - pipe->writers++;
42401 + atomic_dec(&pipe->readers);
42402 + atomic_inc(&pipe->writers);
42403 pipe_unlock(pipe);
42404
42405 }
42406 @@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42407 int retval = 0;
42408 int flag = 0;
42409 int ispipe;
42410 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42411 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42412 struct coredump_params cprm = {
42413 .signr = signr,
42414 .regs = regs,
42415 @@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42416
42417 audit_core_dumps(signr);
42418
42419 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42420 + gr_handle_brute_attach(current, cprm.mm_flags);
42421 +
42422 binfmt = mm->binfmt;
42423 if (!binfmt || !binfmt->core_dump)
42424 goto fail;
42425 @@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42426 }
42427 cprm.limit = RLIM_INFINITY;
42428
42429 - dump_count = atomic_inc_return(&core_dump_count);
42430 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42431 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42432 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42433 task_tgid_vnr(current), current->comm);
42434 @@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42435 } else {
42436 struct inode *inode;
42437
42438 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42439 +
42440 if (cprm.limit < binfmt->min_coredump)
42441 goto fail_unlock;
42442
42443 @@ -2246,7 +2565,7 @@ close_fail:
42444 filp_close(cprm.file, NULL);
42445 fail_dropcount:
42446 if (ispipe)
42447 - atomic_dec(&core_dump_count);
42448 + atomic_dec_unchecked(&core_dump_count);
42449 fail_unlock:
42450 kfree(cn.corename);
42451 fail_corename:
42452 @@ -2265,7 +2584,7 @@ fail:
42453 */
42454 int dump_write(struct file *file, const void *addr, int nr)
42455 {
42456 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42457 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42458 }
42459 EXPORT_SYMBOL(dump_write);
42460
42461 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42462 index a8cbe1b..fed04cb 100644
42463 --- a/fs/ext2/balloc.c
42464 +++ b/fs/ext2/balloc.c
42465 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42466
42467 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42468 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42469 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42470 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42471 sbi->s_resuid != current_fsuid() &&
42472 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42473 return 0;
42474 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42475 index a203892..4e64db5 100644
42476 --- a/fs/ext3/balloc.c
42477 +++ b/fs/ext3/balloc.c
42478 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42479
42480 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42481 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42482 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42483 + if (free_blocks < root_blocks + 1 &&
42484 !use_reservation && sbi->s_resuid != current_fsuid() &&
42485 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42486 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42487 + !capable_nolog(CAP_SYS_RESOURCE)) {
42488 return 0;
42489 }
42490 return 1;
42491 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42492 index 12ccacd..a6035fce0 100644
42493 --- a/fs/ext4/balloc.c
42494 +++ b/fs/ext4/balloc.c
42495 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42496 /* Hm, nope. Are (enough) root reserved clusters available? */
42497 if (sbi->s_resuid == current_fsuid() ||
42498 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42499 - capable(CAP_SYS_RESOURCE) ||
42500 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42501 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42502 + capable_nolog(CAP_SYS_RESOURCE)) {
42503
42504 if (free_clusters >= (nclusters + dirty_clusters))
42505 return 1;
42506 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42507 index 5b0e26a..0aa002d 100644
42508 --- a/fs/ext4/ext4.h
42509 +++ b/fs/ext4/ext4.h
42510 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42511 unsigned long s_mb_last_start;
42512
42513 /* stats for buddy allocator */
42514 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42515 - atomic_t s_bal_success; /* we found long enough chunks */
42516 - atomic_t s_bal_allocated; /* in blocks */
42517 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42518 - atomic_t s_bal_goals; /* goal hits */
42519 - atomic_t s_bal_breaks; /* too long searches */
42520 - atomic_t s_bal_2orders; /* 2^order hits */
42521 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42522 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42523 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42524 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42525 + atomic_unchecked_t s_bal_goals; /* goal hits */
42526 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42527 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42528 spinlock_t s_bal_lock;
42529 unsigned long s_mb_buddies_generated;
42530 unsigned long long s_mb_generation_time;
42531 - atomic_t s_mb_lost_chunks;
42532 - atomic_t s_mb_preallocated;
42533 - atomic_t s_mb_discarded;
42534 + atomic_unchecked_t s_mb_lost_chunks;
42535 + atomic_unchecked_t s_mb_preallocated;
42536 + atomic_unchecked_t s_mb_discarded;
42537 atomic_t s_lock_busy;
42538
42539 /* locality groups */
42540 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42541 index e2d8be8..c7f0ce9 100644
42542 --- a/fs/ext4/mballoc.c
42543 +++ b/fs/ext4/mballoc.c
42544 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42545 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42546
42547 if (EXT4_SB(sb)->s_mb_stats)
42548 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42549 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42550
42551 break;
42552 }
42553 @@ -2088,7 +2088,7 @@ repeat:
42554 ac->ac_status = AC_STATUS_CONTINUE;
42555 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42556 cr = 3;
42557 - atomic_inc(&sbi->s_mb_lost_chunks);
42558 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42559 goto repeat;
42560 }
42561 }
42562 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42563 if (sbi->s_mb_stats) {
42564 ext4_msg(sb, KERN_INFO,
42565 "mballoc: %u blocks %u reqs (%u success)",
42566 - atomic_read(&sbi->s_bal_allocated),
42567 - atomic_read(&sbi->s_bal_reqs),
42568 - atomic_read(&sbi->s_bal_success));
42569 + atomic_read_unchecked(&sbi->s_bal_allocated),
42570 + atomic_read_unchecked(&sbi->s_bal_reqs),
42571 + atomic_read_unchecked(&sbi->s_bal_success));
42572 ext4_msg(sb, KERN_INFO,
42573 "mballoc: %u extents scanned, %u goal hits, "
42574 "%u 2^N hits, %u breaks, %u lost",
42575 - atomic_read(&sbi->s_bal_ex_scanned),
42576 - atomic_read(&sbi->s_bal_goals),
42577 - atomic_read(&sbi->s_bal_2orders),
42578 - atomic_read(&sbi->s_bal_breaks),
42579 - atomic_read(&sbi->s_mb_lost_chunks));
42580 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42581 + atomic_read_unchecked(&sbi->s_bal_goals),
42582 + atomic_read_unchecked(&sbi->s_bal_2orders),
42583 + atomic_read_unchecked(&sbi->s_bal_breaks),
42584 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42585 ext4_msg(sb, KERN_INFO,
42586 "mballoc: %lu generated and it took %Lu",
42587 sbi->s_mb_buddies_generated,
42588 sbi->s_mb_generation_time);
42589 ext4_msg(sb, KERN_INFO,
42590 "mballoc: %u preallocated, %u discarded",
42591 - atomic_read(&sbi->s_mb_preallocated),
42592 - atomic_read(&sbi->s_mb_discarded));
42593 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42594 + atomic_read_unchecked(&sbi->s_mb_discarded));
42595 }
42596
42597 free_percpu(sbi->s_locality_groups);
42598 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42599 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42600
42601 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42602 - atomic_inc(&sbi->s_bal_reqs);
42603 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42604 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42605 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42606 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42607 - atomic_inc(&sbi->s_bal_success);
42608 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42609 + atomic_inc_unchecked(&sbi->s_bal_success);
42610 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42611 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42612 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42613 - atomic_inc(&sbi->s_bal_goals);
42614 + atomic_inc_unchecked(&sbi->s_bal_goals);
42615 if (ac->ac_found > sbi->s_mb_max_to_scan)
42616 - atomic_inc(&sbi->s_bal_breaks);
42617 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42618 }
42619
42620 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42621 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42622 trace_ext4_mb_new_inode_pa(ac, pa);
42623
42624 ext4_mb_use_inode_pa(ac, pa);
42625 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42626 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42627
42628 ei = EXT4_I(ac->ac_inode);
42629 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42630 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42631 trace_ext4_mb_new_group_pa(ac, pa);
42632
42633 ext4_mb_use_group_pa(ac, pa);
42634 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42635 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42636
42637 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42638 lg = ac->ac_lg;
42639 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42640 * from the bitmap and continue.
42641 */
42642 }
42643 - atomic_add(free, &sbi->s_mb_discarded);
42644 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42645
42646 return err;
42647 }
42648 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42649 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42650 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42651 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42652 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42653 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42654 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42655
42656 return 0;
42657 diff --git a/fs/fcntl.c b/fs/fcntl.c
42658 index 22764c7..86372c9 100644
42659 --- a/fs/fcntl.c
42660 +++ b/fs/fcntl.c
42661 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42662 if (err)
42663 return err;
42664
42665 + if (gr_handle_chroot_fowner(pid, type))
42666 + return -ENOENT;
42667 + if (gr_check_protected_task_fowner(pid, type))
42668 + return -EACCES;
42669 +
42670 f_modown(filp, pid, type, force);
42671 return 0;
42672 }
42673 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42674
42675 static int f_setown_ex(struct file *filp, unsigned long arg)
42676 {
42677 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42678 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42679 struct f_owner_ex owner;
42680 struct pid *pid;
42681 int type;
42682 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42683
42684 static int f_getown_ex(struct file *filp, unsigned long arg)
42685 {
42686 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42687 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42688 struct f_owner_ex owner;
42689 int ret = 0;
42690
42691 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42692 switch (cmd) {
42693 case F_DUPFD:
42694 case F_DUPFD_CLOEXEC:
42695 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42696 if (arg >= rlimit(RLIMIT_NOFILE))
42697 break;
42698 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42699 diff --git a/fs/fifo.c b/fs/fifo.c
42700 index b1a524d..4ee270e 100644
42701 --- a/fs/fifo.c
42702 +++ b/fs/fifo.c
42703 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42704 */
42705 filp->f_op = &read_pipefifo_fops;
42706 pipe->r_counter++;
42707 - if (pipe->readers++ == 0)
42708 + if (atomic_inc_return(&pipe->readers) == 1)
42709 wake_up_partner(inode);
42710
42711 - if (!pipe->writers) {
42712 + if (!atomic_read(&pipe->writers)) {
42713 if ((filp->f_flags & O_NONBLOCK)) {
42714 /* suppress POLLHUP until we have
42715 * seen a writer */
42716 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42717 * errno=ENXIO when there is no process reading the FIFO.
42718 */
42719 ret = -ENXIO;
42720 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42721 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42722 goto err;
42723
42724 filp->f_op = &write_pipefifo_fops;
42725 pipe->w_counter++;
42726 - if (!pipe->writers++)
42727 + if (atomic_inc_return(&pipe->writers) == 1)
42728 wake_up_partner(inode);
42729
42730 - if (!pipe->readers) {
42731 + if (!atomic_read(&pipe->readers)) {
42732 wait_for_partner(inode, &pipe->r_counter);
42733 if (signal_pending(current))
42734 goto err_wr;
42735 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42736 */
42737 filp->f_op = &rdwr_pipefifo_fops;
42738
42739 - pipe->readers++;
42740 - pipe->writers++;
42741 + atomic_inc(&pipe->readers);
42742 + atomic_inc(&pipe->writers);
42743 pipe->r_counter++;
42744 pipe->w_counter++;
42745 - if (pipe->readers == 1 || pipe->writers == 1)
42746 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42747 wake_up_partner(inode);
42748 break;
42749
42750 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42751 return 0;
42752
42753 err_rd:
42754 - if (!--pipe->readers)
42755 + if (atomic_dec_and_test(&pipe->readers))
42756 wake_up_interruptible(&pipe->wait);
42757 ret = -ERESTARTSYS;
42758 goto err;
42759
42760 err_wr:
42761 - if (!--pipe->writers)
42762 + if (atomic_dec_and_test(&pipe->writers))
42763 wake_up_interruptible(&pipe->wait);
42764 ret = -ERESTARTSYS;
42765 goto err;
42766
42767 err:
42768 - if (!pipe->readers && !pipe->writers)
42769 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42770 free_pipe_info(inode);
42771
42772 err_nocleanup:
42773 diff --git a/fs/file.c b/fs/file.c
42774 index 4c6992d..104cdea 100644
42775 --- a/fs/file.c
42776 +++ b/fs/file.c
42777 @@ -15,6 +15,7 @@
42778 #include <linux/slab.h>
42779 #include <linux/vmalloc.h>
42780 #include <linux/file.h>
42781 +#include <linux/security.h>
42782 #include <linux/fdtable.h>
42783 #include <linux/bitops.h>
42784 #include <linux/interrupt.h>
42785 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42786 * N.B. For clone tasks sharing a files structure, this test
42787 * will limit the total number of files that can be opened.
42788 */
42789 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42790 if (nr >= rlimit(RLIMIT_NOFILE))
42791 return -EMFILE;
42792
42793 diff --git a/fs/filesystems.c b/fs/filesystems.c
42794 index 0845f84..7b4ebef 100644
42795 --- a/fs/filesystems.c
42796 +++ b/fs/filesystems.c
42797 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42798 int len = dot ? dot - name : strlen(name);
42799
42800 fs = __get_fs_type(name, len);
42801 +
42802 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42803 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42804 +#else
42805 if (!fs && (request_module("%.*s", len, name) == 0))
42806 +#endif
42807 fs = __get_fs_type(name, len);
42808
42809 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42810 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42811 index 78b519c..a8b4979 100644
42812 --- a/fs/fs_struct.c
42813 +++ b/fs/fs_struct.c
42814 @@ -4,6 +4,7 @@
42815 #include <linux/path.h>
42816 #include <linux/slab.h>
42817 #include <linux/fs_struct.h>
42818 +#include <linux/grsecurity.h>
42819 #include "internal.h"
42820
42821 static inline void path_get_longterm(struct path *path)
42822 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42823 old_root = fs->root;
42824 fs->root = *path;
42825 path_get_longterm(path);
42826 + gr_set_chroot_entries(current, path);
42827 write_seqcount_end(&fs->seq);
42828 spin_unlock(&fs->lock);
42829 if (old_root.dentry)
42830 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42831 && fs->root.mnt == old_root->mnt) {
42832 path_get_longterm(new_root);
42833 fs->root = *new_root;
42834 + gr_set_chroot_entries(p, new_root);
42835 count++;
42836 }
42837 if (fs->pwd.dentry == old_root->dentry
42838 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42839 spin_lock(&fs->lock);
42840 write_seqcount_begin(&fs->seq);
42841 tsk->fs = NULL;
42842 - kill = !--fs->users;
42843 + gr_clear_chroot_entries(tsk);
42844 + kill = !atomic_dec_return(&fs->users);
42845 write_seqcount_end(&fs->seq);
42846 spin_unlock(&fs->lock);
42847 task_unlock(tsk);
42848 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42849 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42850 /* We don't need to lock fs - think why ;-) */
42851 if (fs) {
42852 - fs->users = 1;
42853 + atomic_set(&fs->users, 1);
42854 fs->in_exec = 0;
42855 spin_lock_init(&fs->lock);
42856 seqcount_init(&fs->seq);
42857 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42858 spin_lock(&old->lock);
42859 fs->root = old->root;
42860 path_get_longterm(&fs->root);
42861 + /* instead of calling gr_set_chroot_entries here,
42862 + we call it from every caller of this function
42863 + */
42864 fs->pwd = old->pwd;
42865 path_get_longterm(&fs->pwd);
42866 spin_unlock(&old->lock);
42867 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42868
42869 task_lock(current);
42870 spin_lock(&fs->lock);
42871 - kill = !--fs->users;
42872 + kill = !atomic_dec_return(&fs->users);
42873 current->fs = new_fs;
42874 + gr_set_chroot_entries(current, &new_fs->root);
42875 spin_unlock(&fs->lock);
42876 task_unlock(current);
42877
42878 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
42879
42880 int current_umask(void)
42881 {
42882 - return current->fs->umask;
42883 + return current->fs->umask | gr_acl_umask();
42884 }
42885 EXPORT_SYMBOL(current_umask);
42886
42887 /* to be mentioned only in INIT_TASK */
42888 struct fs_struct init_fs = {
42889 - .users = 1,
42890 + .users = ATOMIC_INIT(1),
42891 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42892 .seq = SEQCNT_ZERO,
42893 .umask = 0022,
42894 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42895 task_lock(current);
42896
42897 spin_lock(&init_fs.lock);
42898 - init_fs.users++;
42899 + atomic_inc(&init_fs.users);
42900 spin_unlock(&init_fs.lock);
42901
42902 spin_lock(&fs->lock);
42903 current->fs = &init_fs;
42904 - kill = !--fs->users;
42905 + gr_set_chroot_entries(current, &current->fs->root);
42906 + kill = !atomic_dec_return(&fs->users);
42907 spin_unlock(&fs->lock);
42908
42909 task_unlock(current);
42910 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42911 index 9905350..02eaec4 100644
42912 --- a/fs/fscache/cookie.c
42913 +++ b/fs/fscache/cookie.c
42914 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42915 parent ? (char *) parent->def->name : "<no-parent>",
42916 def->name, netfs_data);
42917
42918 - fscache_stat(&fscache_n_acquires);
42919 + fscache_stat_unchecked(&fscache_n_acquires);
42920
42921 /* if there's no parent cookie, then we don't create one here either */
42922 if (!parent) {
42923 - fscache_stat(&fscache_n_acquires_null);
42924 + fscache_stat_unchecked(&fscache_n_acquires_null);
42925 _leave(" [no parent]");
42926 return NULL;
42927 }
42928 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42929 /* allocate and initialise a cookie */
42930 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42931 if (!cookie) {
42932 - fscache_stat(&fscache_n_acquires_oom);
42933 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42934 _leave(" [ENOMEM]");
42935 return NULL;
42936 }
42937 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42938
42939 switch (cookie->def->type) {
42940 case FSCACHE_COOKIE_TYPE_INDEX:
42941 - fscache_stat(&fscache_n_cookie_index);
42942 + fscache_stat_unchecked(&fscache_n_cookie_index);
42943 break;
42944 case FSCACHE_COOKIE_TYPE_DATAFILE:
42945 - fscache_stat(&fscache_n_cookie_data);
42946 + fscache_stat_unchecked(&fscache_n_cookie_data);
42947 break;
42948 default:
42949 - fscache_stat(&fscache_n_cookie_special);
42950 + fscache_stat_unchecked(&fscache_n_cookie_special);
42951 break;
42952 }
42953
42954 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42955 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42956 atomic_dec(&parent->n_children);
42957 __fscache_cookie_put(cookie);
42958 - fscache_stat(&fscache_n_acquires_nobufs);
42959 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42960 _leave(" = NULL");
42961 return NULL;
42962 }
42963 }
42964
42965 - fscache_stat(&fscache_n_acquires_ok);
42966 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42967 _leave(" = %p", cookie);
42968 return cookie;
42969 }
42970 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42971 cache = fscache_select_cache_for_object(cookie->parent);
42972 if (!cache) {
42973 up_read(&fscache_addremove_sem);
42974 - fscache_stat(&fscache_n_acquires_no_cache);
42975 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42976 _leave(" = -ENOMEDIUM [no cache]");
42977 return -ENOMEDIUM;
42978 }
42979 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42980 object = cache->ops->alloc_object(cache, cookie);
42981 fscache_stat_d(&fscache_n_cop_alloc_object);
42982 if (IS_ERR(object)) {
42983 - fscache_stat(&fscache_n_object_no_alloc);
42984 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42985 ret = PTR_ERR(object);
42986 goto error;
42987 }
42988
42989 - fscache_stat(&fscache_n_object_alloc);
42990 + fscache_stat_unchecked(&fscache_n_object_alloc);
42991
42992 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42993
42994 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42995 struct fscache_object *object;
42996 struct hlist_node *_p;
42997
42998 - fscache_stat(&fscache_n_updates);
42999 + fscache_stat_unchecked(&fscache_n_updates);
43000
43001 if (!cookie) {
43002 - fscache_stat(&fscache_n_updates_null);
43003 + fscache_stat_unchecked(&fscache_n_updates_null);
43004 _leave(" [no cookie]");
43005 return;
43006 }
43007 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43008 struct fscache_object *object;
43009 unsigned long event;
43010
43011 - fscache_stat(&fscache_n_relinquishes);
43012 + fscache_stat_unchecked(&fscache_n_relinquishes);
43013 if (retire)
43014 - fscache_stat(&fscache_n_relinquishes_retire);
43015 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
43016
43017 if (!cookie) {
43018 - fscache_stat(&fscache_n_relinquishes_null);
43019 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
43020 _leave(" [no cookie]");
43021 return;
43022 }
43023 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43024
43025 /* wait for the cookie to finish being instantiated (or to fail) */
43026 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
43027 - fscache_stat(&fscache_n_relinquishes_waitcrt);
43028 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
43029 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
43030 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
43031 }
43032 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
43033 index f6aad48..88dcf26 100644
43034 --- a/fs/fscache/internal.h
43035 +++ b/fs/fscache/internal.h
43036 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
43037 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
43038 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
43039
43040 -extern atomic_t fscache_n_op_pend;
43041 -extern atomic_t fscache_n_op_run;
43042 -extern atomic_t fscache_n_op_enqueue;
43043 -extern atomic_t fscache_n_op_deferred_release;
43044 -extern atomic_t fscache_n_op_release;
43045 -extern atomic_t fscache_n_op_gc;
43046 -extern atomic_t fscache_n_op_cancelled;
43047 -extern atomic_t fscache_n_op_rejected;
43048 +extern atomic_unchecked_t fscache_n_op_pend;
43049 +extern atomic_unchecked_t fscache_n_op_run;
43050 +extern atomic_unchecked_t fscache_n_op_enqueue;
43051 +extern atomic_unchecked_t fscache_n_op_deferred_release;
43052 +extern atomic_unchecked_t fscache_n_op_release;
43053 +extern atomic_unchecked_t fscache_n_op_gc;
43054 +extern atomic_unchecked_t fscache_n_op_cancelled;
43055 +extern atomic_unchecked_t fscache_n_op_rejected;
43056
43057 -extern atomic_t fscache_n_attr_changed;
43058 -extern atomic_t fscache_n_attr_changed_ok;
43059 -extern atomic_t fscache_n_attr_changed_nobufs;
43060 -extern atomic_t fscache_n_attr_changed_nomem;
43061 -extern atomic_t fscache_n_attr_changed_calls;
43062 +extern atomic_unchecked_t fscache_n_attr_changed;
43063 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
43064 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43065 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43066 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
43067
43068 -extern atomic_t fscache_n_allocs;
43069 -extern atomic_t fscache_n_allocs_ok;
43070 -extern atomic_t fscache_n_allocs_wait;
43071 -extern atomic_t fscache_n_allocs_nobufs;
43072 -extern atomic_t fscache_n_allocs_intr;
43073 -extern atomic_t fscache_n_allocs_object_dead;
43074 -extern atomic_t fscache_n_alloc_ops;
43075 -extern atomic_t fscache_n_alloc_op_waits;
43076 +extern atomic_unchecked_t fscache_n_allocs;
43077 +extern atomic_unchecked_t fscache_n_allocs_ok;
43078 +extern atomic_unchecked_t fscache_n_allocs_wait;
43079 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
43080 +extern atomic_unchecked_t fscache_n_allocs_intr;
43081 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
43082 +extern atomic_unchecked_t fscache_n_alloc_ops;
43083 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
43084
43085 -extern atomic_t fscache_n_retrievals;
43086 -extern atomic_t fscache_n_retrievals_ok;
43087 -extern atomic_t fscache_n_retrievals_wait;
43088 -extern atomic_t fscache_n_retrievals_nodata;
43089 -extern atomic_t fscache_n_retrievals_nobufs;
43090 -extern atomic_t fscache_n_retrievals_intr;
43091 -extern atomic_t fscache_n_retrievals_nomem;
43092 -extern atomic_t fscache_n_retrievals_object_dead;
43093 -extern atomic_t fscache_n_retrieval_ops;
43094 -extern atomic_t fscache_n_retrieval_op_waits;
43095 +extern atomic_unchecked_t fscache_n_retrievals;
43096 +extern atomic_unchecked_t fscache_n_retrievals_ok;
43097 +extern atomic_unchecked_t fscache_n_retrievals_wait;
43098 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
43099 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43100 +extern atomic_unchecked_t fscache_n_retrievals_intr;
43101 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
43102 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43103 +extern atomic_unchecked_t fscache_n_retrieval_ops;
43104 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43105
43106 -extern atomic_t fscache_n_stores;
43107 -extern atomic_t fscache_n_stores_ok;
43108 -extern atomic_t fscache_n_stores_again;
43109 -extern atomic_t fscache_n_stores_nobufs;
43110 -extern atomic_t fscache_n_stores_oom;
43111 -extern atomic_t fscache_n_store_ops;
43112 -extern atomic_t fscache_n_store_calls;
43113 -extern atomic_t fscache_n_store_pages;
43114 -extern atomic_t fscache_n_store_radix_deletes;
43115 -extern atomic_t fscache_n_store_pages_over_limit;
43116 +extern atomic_unchecked_t fscache_n_stores;
43117 +extern atomic_unchecked_t fscache_n_stores_ok;
43118 +extern atomic_unchecked_t fscache_n_stores_again;
43119 +extern atomic_unchecked_t fscache_n_stores_nobufs;
43120 +extern atomic_unchecked_t fscache_n_stores_oom;
43121 +extern atomic_unchecked_t fscache_n_store_ops;
43122 +extern atomic_unchecked_t fscache_n_store_calls;
43123 +extern atomic_unchecked_t fscache_n_store_pages;
43124 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
43125 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43126
43127 -extern atomic_t fscache_n_store_vmscan_not_storing;
43128 -extern atomic_t fscache_n_store_vmscan_gone;
43129 -extern atomic_t fscache_n_store_vmscan_busy;
43130 -extern atomic_t fscache_n_store_vmscan_cancelled;
43131 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43132 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43133 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43134 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43135
43136 -extern atomic_t fscache_n_marks;
43137 -extern atomic_t fscache_n_uncaches;
43138 +extern atomic_unchecked_t fscache_n_marks;
43139 +extern atomic_unchecked_t fscache_n_uncaches;
43140
43141 -extern atomic_t fscache_n_acquires;
43142 -extern atomic_t fscache_n_acquires_null;
43143 -extern atomic_t fscache_n_acquires_no_cache;
43144 -extern atomic_t fscache_n_acquires_ok;
43145 -extern atomic_t fscache_n_acquires_nobufs;
43146 -extern atomic_t fscache_n_acquires_oom;
43147 +extern atomic_unchecked_t fscache_n_acquires;
43148 +extern atomic_unchecked_t fscache_n_acquires_null;
43149 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
43150 +extern atomic_unchecked_t fscache_n_acquires_ok;
43151 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
43152 +extern atomic_unchecked_t fscache_n_acquires_oom;
43153
43154 -extern atomic_t fscache_n_updates;
43155 -extern atomic_t fscache_n_updates_null;
43156 -extern atomic_t fscache_n_updates_run;
43157 +extern atomic_unchecked_t fscache_n_updates;
43158 +extern atomic_unchecked_t fscache_n_updates_null;
43159 +extern atomic_unchecked_t fscache_n_updates_run;
43160
43161 -extern atomic_t fscache_n_relinquishes;
43162 -extern atomic_t fscache_n_relinquishes_null;
43163 -extern atomic_t fscache_n_relinquishes_waitcrt;
43164 -extern atomic_t fscache_n_relinquishes_retire;
43165 +extern atomic_unchecked_t fscache_n_relinquishes;
43166 +extern atomic_unchecked_t fscache_n_relinquishes_null;
43167 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43168 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
43169
43170 -extern atomic_t fscache_n_cookie_index;
43171 -extern atomic_t fscache_n_cookie_data;
43172 -extern atomic_t fscache_n_cookie_special;
43173 +extern atomic_unchecked_t fscache_n_cookie_index;
43174 +extern atomic_unchecked_t fscache_n_cookie_data;
43175 +extern atomic_unchecked_t fscache_n_cookie_special;
43176
43177 -extern atomic_t fscache_n_object_alloc;
43178 -extern atomic_t fscache_n_object_no_alloc;
43179 -extern atomic_t fscache_n_object_lookups;
43180 -extern atomic_t fscache_n_object_lookups_negative;
43181 -extern atomic_t fscache_n_object_lookups_positive;
43182 -extern atomic_t fscache_n_object_lookups_timed_out;
43183 -extern atomic_t fscache_n_object_created;
43184 -extern atomic_t fscache_n_object_avail;
43185 -extern atomic_t fscache_n_object_dead;
43186 +extern atomic_unchecked_t fscache_n_object_alloc;
43187 +extern atomic_unchecked_t fscache_n_object_no_alloc;
43188 +extern atomic_unchecked_t fscache_n_object_lookups;
43189 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
43190 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
43191 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43192 +extern atomic_unchecked_t fscache_n_object_created;
43193 +extern atomic_unchecked_t fscache_n_object_avail;
43194 +extern atomic_unchecked_t fscache_n_object_dead;
43195
43196 -extern atomic_t fscache_n_checkaux_none;
43197 -extern atomic_t fscache_n_checkaux_okay;
43198 -extern atomic_t fscache_n_checkaux_update;
43199 -extern atomic_t fscache_n_checkaux_obsolete;
43200 +extern atomic_unchecked_t fscache_n_checkaux_none;
43201 +extern atomic_unchecked_t fscache_n_checkaux_okay;
43202 +extern atomic_unchecked_t fscache_n_checkaux_update;
43203 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43204
43205 extern atomic_t fscache_n_cop_alloc_object;
43206 extern atomic_t fscache_n_cop_lookup_object;
43207 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43208 atomic_inc(stat);
43209 }
43210
43211 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43212 +{
43213 + atomic_inc_unchecked(stat);
43214 +}
43215 +
43216 static inline void fscache_stat_d(atomic_t *stat)
43217 {
43218 atomic_dec(stat);
43219 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43220
43221 #define __fscache_stat(stat) (NULL)
43222 #define fscache_stat(stat) do {} while (0)
43223 +#define fscache_stat_unchecked(stat) do {} while (0)
43224 #define fscache_stat_d(stat) do {} while (0)
43225 #endif
43226
43227 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43228 index b6b897c..0ffff9c 100644
43229 --- a/fs/fscache/object.c
43230 +++ b/fs/fscache/object.c
43231 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43232 /* update the object metadata on disk */
43233 case FSCACHE_OBJECT_UPDATING:
43234 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43235 - fscache_stat(&fscache_n_updates_run);
43236 + fscache_stat_unchecked(&fscache_n_updates_run);
43237 fscache_stat(&fscache_n_cop_update_object);
43238 object->cache->ops->update_object(object);
43239 fscache_stat_d(&fscache_n_cop_update_object);
43240 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43241 spin_lock(&object->lock);
43242 object->state = FSCACHE_OBJECT_DEAD;
43243 spin_unlock(&object->lock);
43244 - fscache_stat(&fscache_n_object_dead);
43245 + fscache_stat_unchecked(&fscache_n_object_dead);
43246 goto terminal_transit;
43247
43248 /* handle the parent cache of this object being withdrawn from
43249 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43250 spin_lock(&object->lock);
43251 object->state = FSCACHE_OBJECT_DEAD;
43252 spin_unlock(&object->lock);
43253 - fscache_stat(&fscache_n_object_dead);
43254 + fscache_stat_unchecked(&fscache_n_object_dead);
43255 goto terminal_transit;
43256
43257 /* complain about the object being woken up once it is
43258 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43259 parent->cookie->def->name, cookie->def->name,
43260 object->cache->tag->name);
43261
43262 - fscache_stat(&fscache_n_object_lookups);
43263 + fscache_stat_unchecked(&fscache_n_object_lookups);
43264 fscache_stat(&fscache_n_cop_lookup_object);
43265 ret = object->cache->ops->lookup_object(object);
43266 fscache_stat_d(&fscache_n_cop_lookup_object);
43267 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43268 if (ret == -ETIMEDOUT) {
43269 /* probably stuck behind another object, so move this one to
43270 * the back of the queue */
43271 - fscache_stat(&fscache_n_object_lookups_timed_out);
43272 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43273 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43274 }
43275
43276 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43277
43278 spin_lock(&object->lock);
43279 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43280 - fscache_stat(&fscache_n_object_lookups_negative);
43281 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43282
43283 /* transit here to allow write requests to begin stacking up
43284 * and read requests to begin returning ENODATA */
43285 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43286 * result, in which case there may be data available */
43287 spin_lock(&object->lock);
43288 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43289 - fscache_stat(&fscache_n_object_lookups_positive);
43290 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43291
43292 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43293
43294 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43295 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43296 } else {
43297 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43298 - fscache_stat(&fscache_n_object_created);
43299 + fscache_stat_unchecked(&fscache_n_object_created);
43300
43301 object->state = FSCACHE_OBJECT_AVAILABLE;
43302 spin_unlock(&object->lock);
43303 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43304 fscache_enqueue_dependents(object);
43305
43306 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43307 - fscache_stat(&fscache_n_object_avail);
43308 + fscache_stat_unchecked(&fscache_n_object_avail);
43309
43310 _leave("");
43311 }
43312 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43313 enum fscache_checkaux result;
43314
43315 if (!object->cookie->def->check_aux) {
43316 - fscache_stat(&fscache_n_checkaux_none);
43317 + fscache_stat_unchecked(&fscache_n_checkaux_none);
43318 return FSCACHE_CHECKAUX_OKAY;
43319 }
43320
43321 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43322 switch (result) {
43323 /* entry okay as is */
43324 case FSCACHE_CHECKAUX_OKAY:
43325 - fscache_stat(&fscache_n_checkaux_okay);
43326 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
43327 break;
43328
43329 /* entry requires update */
43330 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43331 - fscache_stat(&fscache_n_checkaux_update);
43332 + fscache_stat_unchecked(&fscache_n_checkaux_update);
43333 break;
43334
43335 /* entry requires deletion */
43336 case FSCACHE_CHECKAUX_OBSOLETE:
43337 - fscache_stat(&fscache_n_checkaux_obsolete);
43338 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43339 break;
43340
43341 default:
43342 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43343 index 30afdfa..2256596 100644
43344 --- a/fs/fscache/operation.c
43345 +++ b/fs/fscache/operation.c
43346 @@ -17,7 +17,7 @@
43347 #include <linux/slab.h>
43348 #include "internal.h"
43349
43350 -atomic_t fscache_op_debug_id;
43351 +atomic_unchecked_t fscache_op_debug_id;
43352 EXPORT_SYMBOL(fscache_op_debug_id);
43353
43354 /**
43355 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43356 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43357 ASSERTCMP(atomic_read(&op->usage), >, 0);
43358
43359 - fscache_stat(&fscache_n_op_enqueue);
43360 + fscache_stat_unchecked(&fscache_n_op_enqueue);
43361 switch (op->flags & FSCACHE_OP_TYPE) {
43362 case FSCACHE_OP_ASYNC:
43363 _debug("queue async");
43364 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43365 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43366 if (op->processor)
43367 fscache_enqueue_operation(op);
43368 - fscache_stat(&fscache_n_op_run);
43369 + fscache_stat_unchecked(&fscache_n_op_run);
43370 }
43371
43372 /*
43373 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43374 if (object->n_ops > 1) {
43375 atomic_inc(&op->usage);
43376 list_add_tail(&op->pend_link, &object->pending_ops);
43377 - fscache_stat(&fscache_n_op_pend);
43378 + fscache_stat_unchecked(&fscache_n_op_pend);
43379 } else if (!list_empty(&object->pending_ops)) {
43380 atomic_inc(&op->usage);
43381 list_add_tail(&op->pend_link, &object->pending_ops);
43382 - fscache_stat(&fscache_n_op_pend);
43383 + fscache_stat_unchecked(&fscache_n_op_pend);
43384 fscache_start_operations(object);
43385 } else {
43386 ASSERTCMP(object->n_in_progress, ==, 0);
43387 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43388 object->n_exclusive++; /* reads and writes must wait */
43389 atomic_inc(&op->usage);
43390 list_add_tail(&op->pend_link, &object->pending_ops);
43391 - fscache_stat(&fscache_n_op_pend);
43392 + fscache_stat_unchecked(&fscache_n_op_pend);
43393 ret = 0;
43394 } else {
43395 /* not allowed to submit ops in any other state */
43396 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43397 if (object->n_exclusive > 0) {
43398 atomic_inc(&op->usage);
43399 list_add_tail(&op->pend_link, &object->pending_ops);
43400 - fscache_stat(&fscache_n_op_pend);
43401 + fscache_stat_unchecked(&fscache_n_op_pend);
43402 } else if (!list_empty(&object->pending_ops)) {
43403 atomic_inc(&op->usage);
43404 list_add_tail(&op->pend_link, &object->pending_ops);
43405 - fscache_stat(&fscache_n_op_pend);
43406 + fscache_stat_unchecked(&fscache_n_op_pend);
43407 fscache_start_operations(object);
43408 } else {
43409 ASSERTCMP(object->n_exclusive, ==, 0);
43410 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43411 object->n_ops++;
43412 atomic_inc(&op->usage);
43413 list_add_tail(&op->pend_link, &object->pending_ops);
43414 - fscache_stat(&fscache_n_op_pend);
43415 + fscache_stat_unchecked(&fscache_n_op_pend);
43416 ret = 0;
43417 } else if (object->state == FSCACHE_OBJECT_DYING ||
43418 object->state == FSCACHE_OBJECT_LC_DYING ||
43419 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43420 - fscache_stat(&fscache_n_op_rejected);
43421 + fscache_stat_unchecked(&fscache_n_op_rejected);
43422 ret = -ENOBUFS;
43423 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43424 fscache_report_unexpected_submission(object, op, ostate);
43425 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43426
43427 ret = -EBUSY;
43428 if (!list_empty(&op->pend_link)) {
43429 - fscache_stat(&fscache_n_op_cancelled);
43430 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43431 list_del_init(&op->pend_link);
43432 object->n_ops--;
43433 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43434 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43435 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43436 BUG();
43437
43438 - fscache_stat(&fscache_n_op_release);
43439 + fscache_stat_unchecked(&fscache_n_op_release);
43440
43441 if (op->release) {
43442 op->release(op);
43443 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43444 * lock, and defer it otherwise */
43445 if (!spin_trylock(&object->lock)) {
43446 _debug("defer put");
43447 - fscache_stat(&fscache_n_op_deferred_release);
43448 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43449
43450 cache = object->cache;
43451 spin_lock(&cache->op_gc_list_lock);
43452 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43453
43454 _debug("GC DEFERRED REL OBJ%x OP%x",
43455 object->debug_id, op->debug_id);
43456 - fscache_stat(&fscache_n_op_gc);
43457 + fscache_stat_unchecked(&fscache_n_op_gc);
43458
43459 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43460
43461 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43462 index 3f7a59b..cf196cc 100644
43463 --- a/fs/fscache/page.c
43464 +++ b/fs/fscache/page.c
43465 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43466 val = radix_tree_lookup(&cookie->stores, page->index);
43467 if (!val) {
43468 rcu_read_unlock();
43469 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43470 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43471 __fscache_uncache_page(cookie, page);
43472 return true;
43473 }
43474 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43475 spin_unlock(&cookie->stores_lock);
43476
43477 if (xpage) {
43478 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43479 - fscache_stat(&fscache_n_store_radix_deletes);
43480 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43481 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43482 ASSERTCMP(xpage, ==, page);
43483 } else {
43484 - fscache_stat(&fscache_n_store_vmscan_gone);
43485 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43486 }
43487
43488 wake_up_bit(&cookie->flags, 0);
43489 @@ -107,7 +107,7 @@ page_busy:
43490 /* we might want to wait here, but that could deadlock the allocator as
43491 * the work threads writing to the cache may all end up sleeping
43492 * on memory allocation */
43493 - fscache_stat(&fscache_n_store_vmscan_busy);
43494 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43495 return false;
43496 }
43497 EXPORT_SYMBOL(__fscache_maybe_release_page);
43498 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43499 FSCACHE_COOKIE_STORING_TAG);
43500 if (!radix_tree_tag_get(&cookie->stores, page->index,
43501 FSCACHE_COOKIE_PENDING_TAG)) {
43502 - fscache_stat(&fscache_n_store_radix_deletes);
43503 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43504 xpage = radix_tree_delete(&cookie->stores, page->index);
43505 }
43506 spin_unlock(&cookie->stores_lock);
43507 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43508
43509 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43510
43511 - fscache_stat(&fscache_n_attr_changed_calls);
43512 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43513
43514 if (fscache_object_is_active(object)) {
43515 fscache_stat(&fscache_n_cop_attr_changed);
43516 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43517
43518 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43519
43520 - fscache_stat(&fscache_n_attr_changed);
43521 + fscache_stat_unchecked(&fscache_n_attr_changed);
43522
43523 op = kzalloc(sizeof(*op), GFP_KERNEL);
43524 if (!op) {
43525 - fscache_stat(&fscache_n_attr_changed_nomem);
43526 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43527 _leave(" = -ENOMEM");
43528 return -ENOMEM;
43529 }
43530 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43531 if (fscache_submit_exclusive_op(object, op) < 0)
43532 goto nobufs;
43533 spin_unlock(&cookie->lock);
43534 - fscache_stat(&fscache_n_attr_changed_ok);
43535 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43536 fscache_put_operation(op);
43537 _leave(" = 0");
43538 return 0;
43539 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43540 nobufs:
43541 spin_unlock(&cookie->lock);
43542 kfree(op);
43543 - fscache_stat(&fscache_n_attr_changed_nobufs);
43544 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43545 _leave(" = %d", -ENOBUFS);
43546 return -ENOBUFS;
43547 }
43548 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43549 /* allocate a retrieval operation and attempt to submit it */
43550 op = kzalloc(sizeof(*op), GFP_NOIO);
43551 if (!op) {
43552 - fscache_stat(&fscache_n_retrievals_nomem);
43553 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43554 return NULL;
43555 }
43556
43557 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43558 return 0;
43559 }
43560
43561 - fscache_stat(&fscache_n_retrievals_wait);
43562 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43563
43564 jif = jiffies;
43565 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43566 fscache_wait_bit_interruptible,
43567 TASK_INTERRUPTIBLE) != 0) {
43568 - fscache_stat(&fscache_n_retrievals_intr);
43569 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43570 _leave(" = -ERESTARTSYS");
43571 return -ERESTARTSYS;
43572 }
43573 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43574 */
43575 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43576 struct fscache_retrieval *op,
43577 - atomic_t *stat_op_waits,
43578 - atomic_t *stat_object_dead)
43579 + atomic_unchecked_t *stat_op_waits,
43580 + atomic_unchecked_t *stat_object_dead)
43581 {
43582 int ret;
43583
43584 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43585 goto check_if_dead;
43586
43587 _debug(">>> WT");
43588 - fscache_stat(stat_op_waits);
43589 + fscache_stat_unchecked(stat_op_waits);
43590 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43591 fscache_wait_bit_interruptible,
43592 TASK_INTERRUPTIBLE) < 0) {
43593 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43594
43595 check_if_dead:
43596 if (unlikely(fscache_object_is_dead(object))) {
43597 - fscache_stat(stat_object_dead);
43598 + fscache_stat_unchecked(stat_object_dead);
43599 return -ENOBUFS;
43600 }
43601 return 0;
43602 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43603
43604 _enter("%p,%p,,,", cookie, page);
43605
43606 - fscache_stat(&fscache_n_retrievals);
43607 + fscache_stat_unchecked(&fscache_n_retrievals);
43608
43609 if (hlist_empty(&cookie->backing_objects))
43610 goto nobufs;
43611 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43612 goto nobufs_unlock;
43613 spin_unlock(&cookie->lock);
43614
43615 - fscache_stat(&fscache_n_retrieval_ops);
43616 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43617
43618 /* pin the netfs read context in case we need to do the actual netfs
43619 * read because we've encountered a cache read failure */
43620 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43621
43622 error:
43623 if (ret == -ENOMEM)
43624 - fscache_stat(&fscache_n_retrievals_nomem);
43625 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43626 else if (ret == -ERESTARTSYS)
43627 - fscache_stat(&fscache_n_retrievals_intr);
43628 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43629 else if (ret == -ENODATA)
43630 - fscache_stat(&fscache_n_retrievals_nodata);
43631 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43632 else if (ret < 0)
43633 - fscache_stat(&fscache_n_retrievals_nobufs);
43634 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43635 else
43636 - fscache_stat(&fscache_n_retrievals_ok);
43637 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43638
43639 fscache_put_retrieval(op);
43640 _leave(" = %d", ret);
43641 @@ -429,7 +429,7 @@ nobufs_unlock:
43642 spin_unlock(&cookie->lock);
43643 kfree(op);
43644 nobufs:
43645 - fscache_stat(&fscache_n_retrievals_nobufs);
43646 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43647 _leave(" = -ENOBUFS");
43648 return -ENOBUFS;
43649 }
43650 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43651
43652 _enter("%p,,%d,,,", cookie, *nr_pages);
43653
43654 - fscache_stat(&fscache_n_retrievals);
43655 + fscache_stat_unchecked(&fscache_n_retrievals);
43656
43657 if (hlist_empty(&cookie->backing_objects))
43658 goto nobufs;
43659 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43660 goto nobufs_unlock;
43661 spin_unlock(&cookie->lock);
43662
43663 - fscache_stat(&fscache_n_retrieval_ops);
43664 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43665
43666 /* pin the netfs read context in case we need to do the actual netfs
43667 * read because we've encountered a cache read failure */
43668 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43669
43670 error:
43671 if (ret == -ENOMEM)
43672 - fscache_stat(&fscache_n_retrievals_nomem);
43673 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43674 else if (ret == -ERESTARTSYS)
43675 - fscache_stat(&fscache_n_retrievals_intr);
43676 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43677 else if (ret == -ENODATA)
43678 - fscache_stat(&fscache_n_retrievals_nodata);
43679 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43680 else if (ret < 0)
43681 - fscache_stat(&fscache_n_retrievals_nobufs);
43682 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43683 else
43684 - fscache_stat(&fscache_n_retrievals_ok);
43685 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43686
43687 fscache_put_retrieval(op);
43688 _leave(" = %d", ret);
43689 @@ -545,7 +545,7 @@ nobufs_unlock:
43690 spin_unlock(&cookie->lock);
43691 kfree(op);
43692 nobufs:
43693 - fscache_stat(&fscache_n_retrievals_nobufs);
43694 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43695 _leave(" = -ENOBUFS");
43696 return -ENOBUFS;
43697 }
43698 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43699
43700 _enter("%p,%p,,,", cookie, page);
43701
43702 - fscache_stat(&fscache_n_allocs);
43703 + fscache_stat_unchecked(&fscache_n_allocs);
43704
43705 if (hlist_empty(&cookie->backing_objects))
43706 goto nobufs;
43707 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43708 goto nobufs_unlock;
43709 spin_unlock(&cookie->lock);
43710
43711 - fscache_stat(&fscache_n_alloc_ops);
43712 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43713
43714 ret = fscache_wait_for_retrieval_activation(
43715 object, op,
43716 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43717
43718 error:
43719 if (ret == -ERESTARTSYS)
43720 - fscache_stat(&fscache_n_allocs_intr);
43721 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43722 else if (ret < 0)
43723 - fscache_stat(&fscache_n_allocs_nobufs);
43724 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43725 else
43726 - fscache_stat(&fscache_n_allocs_ok);
43727 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43728
43729 fscache_put_retrieval(op);
43730 _leave(" = %d", ret);
43731 @@ -625,7 +625,7 @@ nobufs_unlock:
43732 spin_unlock(&cookie->lock);
43733 kfree(op);
43734 nobufs:
43735 - fscache_stat(&fscache_n_allocs_nobufs);
43736 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43737 _leave(" = -ENOBUFS");
43738 return -ENOBUFS;
43739 }
43740 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43741
43742 spin_lock(&cookie->stores_lock);
43743
43744 - fscache_stat(&fscache_n_store_calls);
43745 + fscache_stat_unchecked(&fscache_n_store_calls);
43746
43747 /* find a page to store */
43748 page = NULL;
43749 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43750 page = results[0];
43751 _debug("gang %d [%lx]", n, page->index);
43752 if (page->index > op->store_limit) {
43753 - fscache_stat(&fscache_n_store_pages_over_limit);
43754 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43755 goto superseded;
43756 }
43757
43758 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43759 spin_unlock(&cookie->stores_lock);
43760 spin_unlock(&object->lock);
43761
43762 - fscache_stat(&fscache_n_store_pages);
43763 + fscache_stat_unchecked(&fscache_n_store_pages);
43764 fscache_stat(&fscache_n_cop_write_page);
43765 ret = object->cache->ops->write_page(op, page);
43766 fscache_stat_d(&fscache_n_cop_write_page);
43767 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43768 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43769 ASSERT(PageFsCache(page));
43770
43771 - fscache_stat(&fscache_n_stores);
43772 + fscache_stat_unchecked(&fscache_n_stores);
43773
43774 op = kzalloc(sizeof(*op), GFP_NOIO);
43775 if (!op)
43776 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43777 spin_unlock(&cookie->stores_lock);
43778 spin_unlock(&object->lock);
43779
43780 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43781 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43782 op->store_limit = object->store_limit;
43783
43784 if (fscache_submit_op(object, &op->op) < 0)
43785 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43786
43787 spin_unlock(&cookie->lock);
43788 radix_tree_preload_end();
43789 - fscache_stat(&fscache_n_store_ops);
43790 - fscache_stat(&fscache_n_stores_ok);
43791 + fscache_stat_unchecked(&fscache_n_store_ops);
43792 + fscache_stat_unchecked(&fscache_n_stores_ok);
43793
43794 /* the work queue now carries its own ref on the object */
43795 fscache_put_operation(&op->op);
43796 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43797 return 0;
43798
43799 already_queued:
43800 - fscache_stat(&fscache_n_stores_again);
43801 + fscache_stat_unchecked(&fscache_n_stores_again);
43802 already_pending:
43803 spin_unlock(&cookie->stores_lock);
43804 spin_unlock(&object->lock);
43805 spin_unlock(&cookie->lock);
43806 radix_tree_preload_end();
43807 kfree(op);
43808 - fscache_stat(&fscache_n_stores_ok);
43809 + fscache_stat_unchecked(&fscache_n_stores_ok);
43810 _leave(" = 0");
43811 return 0;
43812
43813 @@ -851,14 +851,14 @@ nobufs:
43814 spin_unlock(&cookie->lock);
43815 radix_tree_preload_end();
43816 kfree(op);
43817 - fscache_stat(&fscache_n_stores_nobufs);
43818 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43819 _leave(" = -ENOBUFS");
43820 return -ENOBUFS;
43821
43822 nomem_free:
43823 kfree(op);
43824 nomem:
43825 - fscache_stat(&fscache_n_stores_oom);
43826 + fscache_stat_unchecked(&fscache_n_stores_oom);
43827 _leave(" = -ENOMEM");
43828 return -ENOMEM;
43829 }
43830 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43831 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43832 ASSERTCMP(page, !=, NULL);
43833
43834 - fscache_stat(&fscache_n_uncaches);
43835 + fscache_stat_unchecked(&fscache_n_uncaches);
43836
43837 /* cache withdrawal may beat us to it */
43838 if (!PageFsCache(page))
43839 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43840 unsigned long loop;
43841
43842 #ifdef CONFIG_FSCACHE_STATS
43843 - atomic_add(pagevec->nr, &fscache_n_marks);
43844 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43845 #endif
43846
43847 for (loop = 0; loop < pagevec->nr; loop++) {
43848 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43849 index 4765190..2a067f2 100644
43850 --- a/fs/fscache/stats.c
43851 +++ b/fs/fscache/stats.c
43852 @@ -18,95 +18,95 @@
43853 /*
43854 * operation counters
43855 */
43856 -atomic_t fscache_n_op_pend;
43857 -atomic_t fscache_n_op_run;
43858 -atomic_t fscache_n_op_enqueue;
43859 -atomic_t fscache_n_op_requeue;
43860 -atomic_t fscache_n_op_deferred_release;
43861 -atomic_t fscache_n_op_release;
43862 -atomic_t fscache_n_op_gc;
43863 -atomic_t fscache_n_op_cancelled;
43864 -atomic_t fscache_n_op_rejected;
43865 +atomic_unchecked_t fscache_n_op_pend;
43866 +atomic_unchecked_t fscache_n_op_run;
43867 +atomic_unchecked_t fscache_n_op_enqueue;
43868 +atomic_unchecked_t fscache_n_op_requeue;
43869 +atomic_unchecked_t fscache_n_op_deferred_release;
43870 +atomic_unchecked_t fscache_n_op_release;
43871 +atomic_unchecked_t fscache_n_op_gc;
43872 +atomic_unchecked_t fscache_n_op_cancelled;
43873 +atomic_unchecked_t fscache_n_op_rejected;
43874
43875 -atomic_t fscache_n_attr_changed;
43876 -atomic_t fscache_n_attr_changed_ok;
43877 -atomic_t fscache_n_attr_changed_nobufs;
43878 -atomic_t fscache_n_attr_changed_nomem;
43879 -atomic_t fscache_n_attr_changed_calls;
43880 +atomic_unchecked_t fscache_n_attr_changed;
43881 +atomic_unchecked_t fscache_n_attr_changed_ok;
43882 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43883 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43884 +atomic_unchecked_t fscache_n_attr_changed_calls;
43885
43886 -atomic_t fscache_n_allocs;
43887 -atomic_t fscache_n_allocs_ok;
43888 -atomic_t fscache_n_allocs_wait;
43889 -atomic_t fscache_n_allocs_nobufs;
43890 -atomic_t fscache_n_allocs_intr;
43891 -atomic_t fscache_n_allocs_object_dead;
43892 -atomic_t fscache_n_alloc_ops;
43893 -atomic_t fscache_n_alloc_op_waits;
43894 +atomic_unchecked_t fscache_n_allocs;
43895 +atomic_unchecked_t fscache_n_allocs_ok;
43896 +atomic_unchecked_t fscache_n_allocs_wait;
43897 +atomic_unchecked_t fscache_n_allocs_nobufs;
43898 +atomic_unchecked_t fscache_n_allocs_intr;
43899 +atomic_unchecked_t fscache_n_allocs_object_dead;
43900 +atomic_unchecked_t fscache_n_alloc_ops;
43901 +atomic_unchecked_t fscache_n_alloc_op_waits;
43902
43903 -atomic_t fscache_n_retrievals;
43904 -atomic_t fscache_n_retrievals_ok;
43905 -atomic_t fscache_n_retrievals_wait;
43906 -atomic_t fscache_n_retrievals_nodata;
43907 -atomic_t fscache_n_retrievals_nobufs;
43908 -atomic_t fscache_n_retrievals_intr;
43909 -atomic_t fscache_n_retrievals_nomem;
43910 -atomic_t fscache_n_retrievals_object_dead;
43911 -atomic_t fscache_n_retrieval_ops;
43912 -atomic_t fscache_n_retrieval_op_waits;
43913 +atomic_unchecked_t fscache_n_retrievals;
43914 +atomic_unchecked_t fscache_n_retrievals_ok;
43915 +atomic_unchecked_t fscache_n_retrievals_wait;
43916 +atomic_unchecked_t fscache_n_retrievals_nodata;
43917 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43918 +atomic_unchecked_t fscache_n_retrievals_intr;
43919 +atomic_unchecked_t fscache_n_retrievals_nomem;
43920 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43921 +atomic_unchecked_t fscache_n_retrieval_ops;
43922 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43923
43924 -atomic_t fscache_n_stores;
43925 -atomic_t fscache_n_stores_ok;
43926 -atomic_t fscache_n_stores_again;
43927 -atomic_t fscache_n_stores_nobufs;
43928 -atomic_t fscache_n_stores_oom;
43929 -atomic_t fscache_n_store_ops;
43930 -atomic_t fscache_n_store_calls;
43931 -atomic_t fscache_n_store_pages;
43932 -atomic_t fscache_n_store_radix_deletes;
43933 -atomic_t fscache_n_store_pages_over_limit;
43934 +atomic_unchecked_t fscache_n_stores;
43935 +atomic_unchecked_t fscache_n_stores_ok;
43936 +atomic_unchecked_t fscache_n_stores_again;
43937 +atomic_unchecked_t fscache_n_stores_nobufs;
43938 +atomic_unchecked_t fscache_n_stores_oom;
43939 +atomic_unchecked_t fscache_n_store_ops;
43940 +atomic_unchecked_t fscache_n_store_calls;
43941 +atomic_unchecked_t fscache_n_store_pages;
43942 +atomic_unchecked_t fscache_n_store_radix_deletes;
43943 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43944
43945 -atomic_t fscache_n_store_vmscan_not_storing;
43946 -atomic_t fscache_n_store_vmscan_gone;
43947 -atomic_t fscache_n_store_vmscan_busy;
43948 -atomic_t fscache_n_store_vmscan_cancelled;
43949 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43950 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43951 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43952 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43953
43954 -atomic_t fscache_n_marks;
43955 -atomic_t fscache_n_uncaches;
43956 +atomic_unchecked_t fscache_n_marks;
43957 +atomic_unchecked_t fscache_n_uncaches;
43958
43959 -atomic_t fscache_n_acquires;
43960 -atomic_t fscache_n_acquires_null;
43961 -atomic_t fscache_n_acquires_no_cache;
43962 -atomic_t fscache_n_acquires_ok;
43963 -atomic_t fscache_n_acquires_nobufs;
43964 -atomic_t fscache_n_acquires_oom;
43965 +atomic_unchecked_t fscache_n_acquires;
43966 +atomic_unchecked_t fscache_n_acquires_null;
43967 +atomic_unchecked_t fscache_n_acquires_no_cache;
43968 +atomic_unchecked_t fscache_n_acquires_ok;
43969 +atomic_unchecked_t fscache_n_acquires_nobufs;
43970 +atomic_unchecked_t fscache_n_acquires_oom;
43971
43972 -atomic_t fscache_n_updates;
43973 -atomic_t fscache_n_updates_null;
43974 -atomic_t fscache_n_updates_run;
43975 +atomic_unchecked_t fscache_n_updates;
43976 +atomic_unchecked_t fscache_n_updates_null;
43977 +atomic_unchecked_t fscache_n_updates_run;
43978
43979 -atomic_t fscache_n_relinquishes;
43980 -atomic_t fscache_n_relinquishes_null;
43981 -atomic_t fscache_n_relinquishes_waitcrt;
43982 -atomic_t fscache_n_relinquishes_retire;
43983 +atomic_unchecked_t fscache_n_relinquishes;
43984 +atomic_unchecked_t fscache_n_relinquishes_null;
43985 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43986 +atomic_unchecked_t fscache_n_relinquishes_retire;
43987
43988 -atomic_t fscache_n_cookie_index;
43989 -atomic_t fscache_n_cookie_data;
43990 -atomic_t fscache_n_cookie_special;
43991 +atomic_unchecked_t fscache_n_cookie_index;
43992 +atomic_unchecked_t fscache_n_cookie_data;
43993 +atomic_unchecked_t fscache_n_cookie_special;
43994
43995 -atomic_t fscache_n_object_alloc;
43996 -atomic_t fscache_n_object_no_alloc;
43997 -atomic_t fscache_n_object_lookups;
43998 -atomic_t fscache_n_object_lookups_negative;
43999 -atomic_t fscache_n_object_lookups_positive;
44000 -atomic_t fscache_n_object_lookups_timed_out;
44001 -atomic_t fscache_n_object_created;
44002 -atomic_t fscache_n_object_avail;
44003 -atomic_t fscache_n_object_dead;
44004 +atomic_unchecked_t fscache_n_object_alloc;
44005 +atomic_unchecked_t fscache_n_object_no_alloc;
44006 +atomic_unchecked_t fscache_n_object_lookups;
44007 +atomic_unchecked_t fscache_n_object_lookups_negative;
44008 +atomic_unchecked_t fscache_n_object_lookups_positive;
44009 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
44010 +atomic_unchecked_t fscache_n_object_created;
44011 +atomic_unchecked_t fscache_n_object_avail;
44012 +atomic_unchecked_t fscache_n_object_dead;
44013
44014 -atomic_t fscache_n_checkaux_none;
44015 -atomic_t fscache_n_checkaux_okay;
44016 -atomic_t fscache_n_checkaux_update;
44017 -atomic_t fscache_n_checkaux_obsolete;
44018 +atomic_unchecked_t fscache_n_checkaux_none;
44019 +atomic_unchecked_t fscache_n_checkaux_okay;
44020 +atomic_unchecked_t fscache_n_checkaux_update;
44021 +atomic_unchecked_t fscache_n_checkaux_obsolete;
44022
44023 atomic_t fscache_n_cop_alloc_object;
44024 atomic_t fscache_n_cop_lookup_object;
44025 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
44026 seq_puts(m, "FS-Cache statistics\n");
44027
44028 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
44029 - atomic_read(&fscache_n_cookie_index),
44030 - atomic_read(&fscache_n_cookie_data),
44031 - atomic_read(&fscache_n_cookie_special));
44032 + atomic_read_unchecked(&fscache_n_cookie_index),
44033 + atomic_read_unchecked(&fscache_n_cookie_data),
44034 + atomic_read_unchecked(&fscache_n_cookie_special));
44035
44036 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
44037 - atomic_read(&fscache_n_object_alloc),
44038 - atomic_read(&fscache_n_object_no_alloc),
44039 - atomic_read(&fscache_n_object_avail),
44040 - atomic_read(&fscache_n_object_dead));
44041 + atomic_read_unchecked(&fscache_n_object_alloc),
44042 + atomic_read_unchecked(&fscache_n_object_no_alloc),
44043 + atomic_read_unchecked(&fscache_n_object_avail),
44044 + atomic_read_unchecked(&fscache_n_object_dead));
44045 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
44046 - atomic_read(&fscache_n_checkaux_none),
44047 - atomic_read(&fscache_n_checkaux_okay),
44048 - atomic_read(&fscache_n_checkaux_update),
44049 - atomic_read(&fscache_n_checkaux_obsolete));
44050 + atomic_read_unchecked(&fscache_n_checkaux_none),
44051 + atomic_read_unchecked(&fscache_n_checkaux_okay),
44052 + atomic_read_unchecked(&fscache_n_checkaux_update),
44053 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
44054
44055 seq_printf(m, "Pages : mrk=%u unc=%u\n",
44056 - atomic_read(&fscache_n_marks),
44057 - atomic_read(&fscache_n_uncaches));
44058 + atomic_read_unchecked(&fscache_n_marks),
44059 + atomic_read_unchecked(&fscache_n_uncaches));
44060
44061 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44062 " oom=%u\n",
44063 - atomic_read(&fscache_n_acquires),
44064 - atomic_read(&fscache_n_acquires_null),
44065 - atomic_read(&fscache_n_acquires_no_cache),
44066 - atomic_read(&fscache_n_acquires_ok),
44067 - atomic_read(&fscache_n_acquires_nobufs),
44068 - atomic_read(&fscache_n_acquires_oom));
44069 + atomic_read_unchecked(&fscache_n_acquires),
44070 + atomic_read_unchecked(&fscache_n_acquires_null),
44071 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
44072 + atomic_read_unchecked(&fscache_n_acquires_ok),
44073 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
44074 + atomic_read_unchecked(&fscache_n_acquires_oom));
44075
44076 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44077 - atomic_read(&fscache_n_object_lookups),
44078 - atomic_read(&fscache_n_object_lookups_negative),
44079 - atomic_read(&fscache_n_object_lookups_positive),
44080 - atomic_read(&fscache_n_object_created),
44081 - atomic_read(&fscache_n_object_lookups_timed_out));
44082 + atomic_read_unchecked(&fscache_n_object_lookups),
44083 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
44084 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
44085 + atomic_read_unchecked(&fscache_n_object_created),
44086 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44087
44088 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44089 - atomic_read(&fscache_n_updates),
44090 - atomic_read(&fscache_n_updates_null),
44091 - atomic_read(&fscache_n_updates_run));
44092 + atomic_read_unchecked(&fscache_n_updates),
44093 + atomic_read_unchecked(&fscache_n_updates_null),
44094 + atomic_read_unchecked(&fscache_n_updates_run));
44095
44096 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44097 - atomic_read(&fscache_n_relinquishes),
44098 - atomic_read(&fscache_n_relinquishes_null),
44099 - atomic_read(&fscache_n_relinquishes_waitcrt),
44100 - atomic_read(&fscache_n_relinquishes_retire));
44101 + atomic_read_unchecked(&fscache_n_relinquishes),
44102 + atomic_read_unchecked(&fscache_n_relinquishes_null),
44103 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44104 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
44105
44106 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44107 - atomic_read(&fscache_n_attr_changed),
44108 - atomic_read(&fscache_n_attr_changed_ok),
44109 - atomic_read(&fscache_n_attr_changed_nobufs),
44110 - atomic_read(&fscache_n_attr_changed_nomem),
44111 - atomic_read(&fscache_n_attr_changed_calls));
44112 + atomic_read_unchecked(&fscache_n_attr_changed),
44113 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
44114 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44115 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44116 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
44117
44118 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44119 - atomic_read(&fscache_n_allocs),
44120 - atomic_read(&fscache_n_allocs_ok),
44121 - atomic_read(&fscache_n_allocs_wait),
44122 - atomic_read(&fscache_n_allocs_nobufs),
44123 - atomic_read(&fscache_n_allocs_intr));
44124 + atomic_read_unchecked(&fscache_n_allocs),
44125 + atomic_read_unchecked(&fscache_n_allocs_ok),
44126 + atomic_read_unchecked(&fscache_n_allocs_wait),
44127 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
44128 + atomic_read_unchecked(&fscache_n_allocs_intr));
44129 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44130 - atomic_read(&fscache_n_alloc_ops),
44131 - atomic_read(&fscache_n_alloc_op_waits),
44132 - atomic_read(&fscache_n_allocs_object_dead));
44133 + atomic_read_unchecked(&fscache_n_alloc_ops),
44134 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
44135 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
44136
44137 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44138 " int=%u oom=%u\n",
44139 - atomic_read(&fscache_n_retrievals),
44140 - atomic_read(&fscache_n_retrievals_ok),
44141 - atomic_read(&fscache_n_retrievals_wait),
44142 - atomic_read(&fscache_n_retrievals_nodata),
44143 - atomic_read(&fscache_n_retrievals_nobufs),
44144 - atomic_read(&fscache_n_retrievals_intr),
44145 - atomic_read(&fscache_n_retrievals_nomem));
44146 + atomic_read_unchecked(&fscache_n_retrievals),
44147 + atomic_read_unchecked(&fscache_n_retrievals_ok),
44148 + atomic_read_unchecked(&fscache_n_retrievals_wait),
44149 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
44150 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44151 + atomic_read_unchecked(&fscache_n_retrievals_intr),
44152 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
44153 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44154 - atomic_read(&fscache_n_retrieval_ops),
44155 - atomic_read(&fscache_n_retrieval_op_waits),
44156 - atomic_read(&fscache_n_retrievals_object_dead));
44157 + atomic_read_unchecked(&fscache_n_retrieval_ops),
44158 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44159 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44160
44161 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44162 - atomic_read(&fscache_n_stores),
44163 - atomic_read(&fscache_n_stores_ok),
44164 - atomic_read(&fscache_n_stores_again),
44165 - atomic_read(&fscache_n_stores_nobufs),
44166 - atomic_read(&fscache_n_stores_oom));
44167 + atomic_read_unchecked(&fscache_n_stores),
44168 + atomic_read_unchecked(&fscache_n_stores_ok),
44169 + atomic_read_unchecked(&fscache_n_stores_again),
44170 + atomic_read_unchecked(&fscache_n_stores_nobufs),
44171 + atomic_read_unchecked(&fscache_n_stores_oom));
44172 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44173 - atomic_read(&fscache_n_store_ops),
44174 - atomic_read(&fscache_n_store_calls),
44175 - atomic_read(&fscache_n_store_pages),
44176 - atomic_read(&fscache_n_store_radix_deletes),
44177 - atomic_read(&fscache_n_store_pages_over_limit));
44178 + atomic_read_unchecked(&fscache_n_store_ops),
44179 + atomic_read_unchecked(&fscache_n_store_calls),
44180 + atomic_read_unchecked(&fscache_n_store_pages),
44181 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
44182 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44183
44184 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44185 - atomic_read(&fscache_n_store_vmscan_not_storing),
44186 - atomic_read(&fscache_n_store_vmscan_gone),
44187 - atomic_read(&fscache_n_store_vmscan_busy),
44188 - atomic_read(&fscache_n_store_vmscan_cancelled));
44189 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44190 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44191 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44192 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44193
44194 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44195 - atomic_read(&fscache_n_op_pend),
44196 - atomic_read(&fscache_n_op_run),
44197 - atomic_read(&fscache_n_op_enqueue),
44198 - atomic_read(&fscache_n_op_cancelled),
44199 - atomic_read(&fscache_n_op_rejected));
44200 + atomic_read_unchecked(&fscache_n_op_pend),
44201 + atomic_read_unchecked(&fscache_n_op_run),
44202 + atomic_read_unchecked(&fscache_n_op_enqueue),
44203 + atomic_read_unchecked(&fscache_n_op_cancelled),
44204 + atomic_read_unchecked(&fscache_n_op_rejected));
44205 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44206 - atomic_read(&fscache_n_op_deferred_release),
44207 - atomic_read(&fscache_n_op_release),
44208 - atomic_read(&fscache_n_op_gc));
44209 + atomic_read_unchecked(&fscache_n_op_deferred_release),
44210 + atomic_read_unchecked(&fscache_n_op_release),
44211 + atomic_read_unchecked(&fscache_n_op_gc));
44212
44213 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44214 atomic_read(&fscache_n_cop_alloc_object),
44215 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44216 index 3426521..3b75162 100644
44217 --- a/fs/fuse/cuse.c
44218 +++ b/fs/fuse/cuse.c
44219 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
44220 INIT_LIST_HEAD(&cuse_conntbl[i]);
44221
44222 /* inherit and extend fuse_dev_operations */
44223 - cuse_channel_fops = fuse_dev_operations;
44224 - cuse_channel_fops.owner = THIS_MODULE;
44225 - cuse_channel_fops.open = cuse_channel_open;
44226 - cuse_channel_fops.release = cuse_channel_release;
44227 + pax_open_kernel();
44228 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44229 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44230 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
44231 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
44232 + pax_close_kernel();
44233
44234 cuse_class = class_create(THIS_MODULE, "cuse");
44235 if (IS_ERR(cuse_class))
44236 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44237 index 2aaf3ea..8e50863 100644
44238 --- a/fs/fuse/dev.c
44239 +++ b/fs/fuse/dev.c
44240 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44241 ret = 0;
44242 pipe_lock(pipe);
44243
44244 - if (!pipe->readers) {
44245 + if (!atomic_read(&pipe->readers)) {
44246 send_sig(SIGPIPE, current, 0);
44247 if (!ret)
44248 ret = -EPIPE;
44249 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44250 index 9f63e49..d8a64c0 100644
44251 --- a/fs/fuse/dir.c
44252 +++ b/fs/fuse/dir.c
44253 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44254 return link;
44255 }
44256
44257 -static void free_link(char *link)
44258 +static void free_link(const char *link)
44259 {
44260 if (!IS_ERR(link))
44261 free_page((unsigned long) link);
44262 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44263 index cfd4959..a780959 100644
44264 --- a/fs/gfs2/inode.c
44265 +++ b/fs/gfs2/inode.c
44266 @@ -1490,7 +1490,7 @@ out:
44267
44268 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44269 {
44270 - char *s = nd_get_link(nd);
44271 + const char *s = nd_get_link(nd);
44272 if (!IS_ERR(s))
44273 kfree(s);
44274 }
44275 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44276 index 0be5a78..9cfb853 100644
44277 --- a/fs/hugetlbfs/inode.c
44278 +++ b/fs/hugetlbfs/inode.c
44279 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44280 .kill_sb = kill_litter_super,
44281 };
44282
44283 -static struct vfsmount *hugetlbfs_vfsmount;
44284 +struct vfsmount *hugetlbfs_vfsmount;
44285
44286 static int can_do_hugetlb_shm(void)
44287 {
44288 diff --git a/fs/inode.c b/fs/inode.c
44289 index ee4e66b..0451521 100644
44290 --- a/fs/inode.c
44291 +++ b/fs/inode.c
44292 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44293
44294 #ifdef CONFIG_SMP
44295 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44296 - static atomic_t shared_last_ino;
44297 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44298 + static atomic_unchecked_t shared_last_ino;
44299 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44300
44301 res = next - LAST_INO_BATCH;
44302 }
44303 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44304 index e513f19..2ab1351 100644
44305 --- a/fs/jffs2/erase.c
44306 +++ b/fs/jffs2/erase.c
44307 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44308 struct jffs2_unknown_node marker = {
44309 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44310 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44311 - .totlen = cpu_to_je32(c->cleanmarker_size)
44312 + .totlen = cpu_to_je32(c->cleanmarker_size),
44313 + .hdr_crc = cpu_to_je32(0)
44314 };
44315
44316 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44317 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44318 index b09e51d..e482afa 100644
44319 --- a/fs/jffs2/wbuf.c
44320 +++ b/fs/jffs2/wbuf.c
44321 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44322 {
44323 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44324 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44325 - .totlen = constant_cpu_to_je32(8)
44326 + .totlen = constant_cpu_to_je32(8),
44327 + .hdr_crc = constant_cpu_to_je32(0)
44328 };
44329
44330 /*
44331 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44332 index a44eff0..462e07d 100644
44333 --- a/fs/jfs/super.c
44334 +++ b/fs/jfs/super.c
44335 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44336
44337 jfs_inode_cachep =
44338 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44339 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44340 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44341 init_once);
44342 if (jfs_inode_cachep == NULL)
44343 return -ENOMEM;
44344 diff --git a/fs/libfs.c b/fs/libfs.c
44345 index f6d411e..e82a08d 100644
44346 --- a/fs/libfs.c
44347 +++ b/fs/libfs.c
44348 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44349
44350 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44351 struct dentry *next;
44352 + char d_name[sizeof(next->d_iname)];
44353 + const unsigned char *name;
44354 +
44355 next = list_entry(p, struct dentry, d_u.d_child);
44356 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44357 if (!simple_positive(next)) {
44358 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44359
44360 spin_unlock(&next->d_lock);
44361 spin_unlock(&dentry->d_lock);
44362 - if (filldir(dirent, next->d_name.name,
44363 + name = next->d_name.name;
44364 + if (name == next->d_iname) {
44365 + memcpy(d_name, name, next->d_name.len);
44366 + name = d_name;
44367 + }
44368 + if (filldir(dirent, name,
44369 next->d_name.len, filp->f_pos,
44370 next->d_inode->i_ino,
44371 dt_type(next->d_inode)) < 0)
44372 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44373 index 8392cb8..80d6193 100644
44374 --- a/fs/lockd/clntproc.c
44375 +++ b/fs/lockd/clntproc.c
44376 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44377 /*
44378 * Cookie counter for NLM requests
44379 */
44380 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44381 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44382
44383 void nlmclnt_next_cookie(struct nlm_cookie *c)
44384 {
44385 - u32 cookie = atomic_inc_return(&nlm_cookie);
44386 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44387
44388 memcpy(c->data, &cookie, 4);
44389 c->len=4;
44390 diff --git a/fs/locks.c b/fs/locks.c
44391 index 637694b..f84a121 100644
44392 --- a/fs/locks.c
44393 +++ b/fs/locks.c
44394 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44395 return;
44396
44397 if (filp->f_op && filp->f_op->flock) {
44398 - struct file_lock fl = {
44399 + struct file_lock flock = {
44400 .fl_pid = current->tgid,
44401 .fl_file = filp,
44402 .fl_flags = FL_FLOCK,
44403 .fl_type = F_UNLCK,
44404 .fl_end = OFFSET_MAX,
44405 };
44406 - filp->f_op->flock(filp, F_SETLKW, &fl);
44407 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44408 - fl.fl_ops->fl_release_private(&fl);
44409 + filp->f_op->flock(filp, F_SETLKW, &flock);
44410 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44411 + flock.fl_ops->fl_release_private(&flock);
44412 }
44413
44414 lock_flocks();
44415 diff --git a/fs/namei.c b/fs/namei.c
44416 index 744e942..24ef47f 100644
44417 --- a/fs/namei.c
44418 +++ b/fs/namei.c
44419 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44420 if (ret != -EACCES)
44421 return ret;
44422
44423 +#ifdef CONFIG_GRKERNSEC
44424 + /* we'll block if we have to log due to a denied capability use */
44425 + if (mask & MAY_NOT_BLOCK)
44426 + return -ECHILD;
44427 +#endif
44428 +
44429 if (S_ISDIR(inode->i_mode)) {
44430 /* DACs are overridable for directories */
44431 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44432 - return 0;
44433 if (!(mask & MAY_WRITE))
44434 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44435 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44436 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44437 return 0;
44438 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44439 + return 0;
44440 return -EACCES;
44441 }
44442 /*
44443 + * Searching includes executable on directories, else just read.
44444 + */
44445 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44446 + if (mask == MAY_READ)
44447 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44448 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44449 + return 0;
44450 +
44451 + /*
44452 * Read/write DACs are always overridable.
44453 * Executable DACs are overridable when there is
44454 * at least one exec bit set.
44455 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44456 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44457 return 0;
44458
44459 - /*
44460 - * Searching includes executable on directories, else just read.
44461 - */
44462 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44463 - if (mask == MAY_READ)
44464 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44465 - return 0;
44466 -
44467 return -EACCES;
44468 }
44469
44470 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44471 return error;
44472 }
44473
44474 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44475 + dentry->d_inode, dentry, nd->path.mnt)) {
44476 + error = -EACCES;
44477 + *p = ERR_PTR(error); /* no ->put_link(), please */
44478 + path_put(&nd->path);
44479 + return error;
44480 + }
44481 +
44482 nd->last_type = LAST_BIND;
44483 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44484 error = PTR_ERR(*p);
44485 if (!IS_ERR(*p)) {
44486 - char *s = nd_get_link(nd);
44487 + const char *s = nd_get_link(nd);
44488 error = 0;
44489 if (s)
44490 error = __vfs_follow_link(nd, s);
44491 @@ -1624,6 +1640,21 @@ static int path_lookupat(int dfd, const char *name,
44492 if (!err)
44493 err = complete_walk(nd);
44494
44495 + if (!(nd->flags & LOOKUP_PARENT)) {
44496 +#ifdef CONFIG_GRKERNSEC
44497 + if (flags & LOOKUP_RCU) {
44498 + if (!err)
44499 + path_put(&nd->path);
44500 + err = -ECHILD;
44501 + } else
44502 +#endif
44503 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44504 + if (!err)
44505 + path_put(&nd->path);
44506 + err = -ENOENT;
44507 + }
44508 + }
44509 +
44510 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44511 if (!nd->inode->i_op->lookup) {
44512 path_put(&nd->path);
44513 @@ -1651,6 +1682,15 @@ static int do_path_lookup(int dfd, const char *name,
44514 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44515
44516 if (likely(!retval)) {
44517 + if (*name != '/' && nd->path.dentry && nd->inode) {
44518 +#ifdef CONFIG_GRKERNSEC
44519 + if (flags & LOOKUP_RCU)
44520 + return -ECHILD;
44521 +#endif
44522 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44523 + return -ENOENT;
44524 + }
44525 +
44526 if (unlikely(!audit_dummy_context())) {
44527 if (nd->path.dentry && nd->inode)
44528 audit_inode(name, nd->path.dentry);
44529 @@ -2048,6 +2088,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44530 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44531 return -EPERM;
44532
44533 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44534 + return -EPERM;
44535 + if (gr_handle_rawio(inode))
44536 + return -EPERM;
44537 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44538 + return -EACCES;
44539 +
44540 return 0;
44541 }
44542
44543 @@ -2109,6 +2156,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44544 error = complete_walk(nd);
44545 if (error)
44546 return ERR_PTR(error);
44547 +#ifdef CONFIG_GRKERNSEC
44548 + if (nd->flags & LOOKUP_RCU) {
44549 + error = -ECHILD;
44550 + goto exit;
44551 + }
44552 +#endif
44553 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44554 + error = -ENOENT;
44555 + goto exit;
44556 + }
44557 audit_inode(pathname, nd->path.dentry);
44558 if (open_flag & O_CREAT) {
44559 error = -EISDIR;
44560 @@ -2119,6 +2176,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44561 error = complete_walk(nd);
44562 if (error)
44563 return ERR_PTR(error);
44564 +#ifdef CONFIG_GRKERNSEC
44565 + if (nd->flags & LOOKUP_RCU) {
44566 + error = -ECHILD;
44567 + goto exit;
44568 + }
44569 +#endif
44570 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44571 + error = -ENOENT;
44572 + goto exit;
44573 + }
44574 audit_inode(pathname, dir);
44575 goto ok;
44576 }
44577 @@ -2140,6 +2207,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44578 error = complete_walk(nd);
44579 if (error)
44580 return ERR_PTR(-ECHILD);
44581 +#ifdef CONFIG_GRKERNSEC
44582 + if (nd->flags & LOOKUP_RCU) {
44583 + error = -ECHILD;
44584 + goto exit;
44585 + }
44586 +#endif
44587 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44588 + error = -ENOENT;
44589 + goto exit;
44590 + }
44591
44592 error = -ENOTDIR;
44593 if (nd->flags & LOOKUP_DIRECTORY) {
44594 @@ -2180,6 +2257,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44595 /* Negative dentry, just create the file */
44596 if (!dentry->d_inode) {
44597 int mode = op->mode;
44598 +
44599 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44600 + error = -EACCES;
44601 + goto exit_mutex_unlock;
44602 + }
44603 +
44604 if (!IS_POSIXACL(dir->d_inode))
44605 mode &= ~current_umask();
44606 /*
44607 @@ -2203,6 +2286,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44608 error = vfs_create(dir->d_inode, dentry, mode, nd);
44609 if (error)
44610 goto exit_mutex_unlock;
44611 + else
44612 + gr_handle_create(path->dentry, path->mnt);
44613 mutex_unlock(&dir->d_inode->i_mutex);
44614 dput(nd->path.dentry);
44615 nd->path.dentry = dentry;
44616 @@ -2212,6 +2297,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44617 /*
44618 * It already exists.
44619 */
44620 +
44621 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44622 + error = -ENOENT;
44623 + goto exit_mutex_unlock;
44624 + }
44625 +
44626 + /* only check if O_CREAT is specified, all other checks need to go
44627 + into may_open */
44628 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44629 + error = -EACCES;
44630 + goto exit_mutex_unlock;
44631 + }
44632 +
44633 mutex_unlock(&dir->d_inode->i_mutex);
44634 audit_inode(pathname, path->dentry);
44635
44636 @@ -2424,6 +2522,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44637 *path = nd.path;
44638 return dentry;
44639 eexist:
44640 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44641 + dput(dentry);
44642 + dentry = ERR_PTR(-ENOENT);
44643 + goto fail;
44644 + }
44645 dput(dentry);
44646 dentry = ERR_PTR(-EEXIST);
44647 fail:
44648 @@ -2446,6 +2549,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44649 }
44650 EXPORT_SYMBOL(user_path_create);
44651
44652 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44653 +{
44654 + char *tmp = getname(pathname);
44655 + struct dentry *res;
44656 + if (IS_ERR(tmp))
44657 + return ERR_CAST(tmp);
44658 + res = kern_path_create(dfd, tmp, path, is_dir);
44659 + if (IS_ERR(res))
44660 + putname(tmp);
44661 + else
44662 + *to = tmp;
44663 + return res;
44664 +}
44665 +
44666 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44667 {
44668 int error = may_create(dir, dentry);
44669 @@ -2513,6 +2630,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44670 error = mnt_want_write(path.mnt);
44671 if (error)
44672 goto out_dput;
44673 +
44674 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44675 + error = -EPERM;
44676 + goto out_drop_write;
44677 + }
44678 +
44679 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44680 + error = -EACCES;
44681 + goto out_drop_write;
44682 + }
44683 +
44684 error = security_path_mknod(&path, dentry, mode, dev);
44685 if (error)
44686 goto out_drop_write;
44687 @@ -2530,6 +2658,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44688 }
44689 out_drop_write:
44690 mnt_drop_write(path.mnt);
44691 +
44692 + if (!error)
44693 + gr_handle_create(dentry, path.mnt);
44694 out_dput:
44695 dput(dentry);
44696 mutex_unlock(&path.dentry->d_inode->i_mutex);
44697 @@ -2579,12 +2710,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44698 error = mnt_want_write(path.mnt);
44699 if (error)
44700 goto out_dput;
44701 +
44702 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44703 + error = -EACCES;
44704 + goto out_drop_write;
44705 + }
44706 +
44707 error = security_path_mkdir(&path, dentry, mode);
44708 if (error)
44709 goto out_drop_write;
44710 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44711 out_drop_write:
44712 mnt_drop_write(path.mnt);
44713 +
44714 + if (!error)
44715 + gr_handle_create(dentry, path.mnt);
44716 out_dput:
44717 dput(dentry);
44718 mutex_unlock(&path.dentry->d_inode->i_mutex);
44719 @@ -2664,6 +2804,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44720 char * name;
44721 struct dentry *dentry;
44722 struct nameidata nd;
44723 + ino_t saved_ino = 0;
44724 + dev_t saved_dev = 0;
44725
44726 error = user_path_parent(dfd, pathname, &nd, &name);
44727 if (error)
44728 @@ -2692,6 +2834,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44729 error = -ENOENT;
44730 goto exit3;
44731 }
44732 +
44733 + saved_ino = dentry->d_inode->i_ino;
44734 + saved_dev = gr_get_dev_from_dentry(dentry);
44735 +
44736 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44737 + error = -EACCES;
44738 + goto exit3;
44739 + }
44740 +
44741 error = mnt_want_write(nd.path.mnt);
44742 if (error)
44743 goto exit3;
44744 @@ -2699,6 +2850,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44745 if (error)
44746 goto exit4;
44747 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44748 + if (!error && (saved_dev || saved_ino))
44749 + gr_handle_delete(saved_ino, saved_dev);
44750 exit4:
44751 mnt_drop_write(nd.path.mnt);
44752 exit3:
44753 @@ -2761,6 +2914,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44754 struct dentry *dentry;
44755 struct nameidata nd;
44756 struct inode *inode = NULL;
44757 + ino_t saved_ino = 0;
44758 + dev_t saved_dev = 0;
44759
44760 error = user_path_parent(dfd, pathname, &nd, &name);
44761 if (error)
44762 @@ -2783,6 +2938,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44763 if (!inode)
44764 goto slashes;
44765 ihold(inode);
44766 +
44767 + if (inode->i_nlink <= 1) {
44768 + saved_ino = inode->i_ino;
44769 + saved_dev = gr_get_dev_from_dentry(dentry);
44770 + }
44771 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44772 + error = -EACCES;
44773 + goto exit2;
44774 + }
44775 +
44776 error = mnt_want_write(nd.path.mnt);
44777 if (error)
44778 goto exit2;
44779 @@ -2790,6 +2955,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44780 if (error)
44781 goto exit3;
44782 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44783 + if (!error && (saved_ino || saved_dev))
44784 + gr_handle_delete(saved_ino, saved_dev);
44785 exit3:
44786 mnt_drop_write(nd.path.mnt);
44787 exit2:
44788 @@ -2865,10 +3032,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44789 error = mnt_want_write(path.mnt);
44790 if (error)
44791 goto out_dput;
44792 +
44793 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44794 + error = -EACCES;
44795 + goto out_drop_write;
44796 + }
44797 +
44798 error = security_path_symlink(&path, dentry, from);
44799 if (error)
44800 goto out_drop_write;
44801 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44802 + if (!error)
44803 + gr_handle_create(dentry, path.mnt);
44804 out_drop_write:
44805 mnt_drop_write(path.mnt);
44806 out_dput:
44807 @@ -2940,6 +3115,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44808 {
44809 struct dentry *new_dentry;
44810 struct path old_path, new_path;
44811 + char *to = NULL;
44812 int how = 0;
44813 int error;
44814
44815 @@ -2963,7 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44816 if (error)
44817 return error;
44818
44819 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44820 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44821 error = PTR_ERR(new_dentry);
44822 if (IS_ERR(new_dentry))
44823 goto out;
44824 @@ -2974,13 +3150,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44825 error = mnt_want_write(new_path.mnt);
44826 if (error)
44827 goto out_dput;
44828 +
44829 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44830 + old_path.dentry->d_inode,
44831 + old_path.dentry->d_inode->i_mode, to)) {
44832 + error = -EACCES;
44833 + goto out_drop_write;
44834 + }
44835 +
44836 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44837 + old_path.dentry, old_path.mnt, to)) {
44838 + error = -EACCES;
44839 + goto out_drop_write;
44840 + }
44841 +
44842 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44843 if (error)
44844 goto out_drop_write;
44845 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44846 + if (!error)
44847 + gr_handle_create(new_dentry, new_path.mnt);
44848 out_drop_write:
44849 mnt_drop_write(new_path.mnt);
44850 out_dput:
44851 + putname(to);
44852 dput(new_dentry);
44853 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44854 path_put(&new_path);
44855 @@ -3208,6 +3401,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44856 if (new_dentry == trap)
44857 goto exit5;
44858
44859 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44860 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44861 + to);
44862 + if (error)
44863 + goto exit5;
44864 +
44865 error = mnt_want_write(oldnd.path.mnt);
44866 if (error)
44867 goto exit5;
44868 @@ -3217,6 +3416,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44869 goto exit6;
44870 error = vfs_rename(old_dir->d_inode, old_dentry,
44871 new_dir->d_inode, new_dentry);
44872 + if (!error)
44873 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44874 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44875 exit6:
44876 mnt_drop_write(oldnd.path.mnt);
44877 exit5:
44878 @@ -3242,6 +3444,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44879
44880 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44881 {
44882 + char tmpbuf[64];
44883 + const char *newlink;
44884 int len;
44885
44886 len = PTR_ERR(link);
44887 @@ -3251,7 +3455,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44888 len = strlen(link);
44889 if (len > (unsigned) buflen)
44890 len = buflen;
44891 - if (copy_to_user(buffer, link, len))
44892 +
44893 + if (len < sizeof(tmpbuf)) {
44894 + memcpy(tmpbuf, link, len);
44895 + newlink = tmpbuf;
44896 + } else
44897 + newlink = link;
44898 +
44899 + if (copy_to_user(buffer, newlink, len))
44900 len = -EFAULT;
44901 out:
44902 return len;
44903 diff --git a/fs/namespace.c b/fs/namespace.c
44904 index cfc6d44..b4632a5 100644
44905 --- a/fs/namespace.c
44906 +++ b/fs/namespace.c
44907 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44908 if (!(sb->s_flags & MS_RDONLY))
44909 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44910 up_write(&sb->s_umount);
44911 +
44912 + gr_log_remount(mnt->mnt_devname, retval);
44913 +
44914 return retval;
44915 }
44916
44917 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44918 br_write_unlock(vfsmount_lock);
44919 up_write(&namespace_sem);
44920 release_mounts(&umount_list);
44921 +
44922 + gr_log_unmount(mnt->mnt_devname, retval);
44923 +
44924 return retval;
44925 }
44926
44927 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44928 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44929 MS_STRICTATIME);
44930
44931 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44932 + retval = -EPERM;
44933 + goto dput_out;
44934 + }
44935 +
44936 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44937 + retval = -EPERM;
44938 + goto dput_out;
44939 + }
44940 +
44941 if (flags & MS_REMOUNT)
44942 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44943 data_page);
44944 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44945 dev_name, data_page);
44946 dput_out:
44947 path_put(&path);
44948 +
44949 + gr_log_mount(dev_name, dir_name, retval);
44950 +
44951 return retval;
44952 }
44953
44954 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44955 if (error)
44956 goto out2;
44957
44958 + if (gr_handle_chroot_pivot()) {
44959 + error = -EPERM;
44960 + goto out2;
44961 + }
44962 +
44963 get_fs_root(current->fs, &root);
44964 error = lock_mount(&old);
44965 if (error)
44966 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44967 index 3db6b82..a57597e 100644
44968 --- a/fs/nfs/blocklayout/blocklayout.c
44969 +++ b/fs/nfs/blocklayout/blocklayout.c
44970 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44971 */
44972 struct parallel_io {
44973 struct kref refcnt;
44974 - struct rpc_call_ops call_ops;
44975 + rpc_call_ops_no_const call_ops;
44976 void (*pnfs_callback) (void *data);
44977 void *data;
44978 };
44979 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44980 index 50a15fa..ca113f9 100644
44981 --- a/fs/nfs/inode.c
44982 +++ b/fs/nfs/inode.c
44983 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44984 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44985 nfsi->attrtimeo_timestamp = jiffies;
44986
44987 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44988 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44989 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44990 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44991 else
44992 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44993 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44994 }
44995
44996 -static atomic_long_t nfs_attr_generation_counter;
44997 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44998
44999 static unsigned long nfs_read_attr_generation_counter(void)
45000 {
45001 - return atomic_long_read(&nfs_attr_generation_counter);
45002 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
45003 }
45004
45005 unsigned long nfs_inc_attr_generation_counter(void)
45006 {
45007 - return atomic_long_inc_return(&nfs_attr_generation_counter);
45008 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
45009 }
45010
45011 void nfs_fattr_init(struct nfs_fattr *fattr)
45012 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
45013 index 7a2e442..8e544cc 100644
45014 --- a/fs/nfsd/vfs.c
45015 +++ b/fs/nfsd/vfs.c
45016 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45017 } else {
45018 oldfs = get_fs();
45019 set_fs(KERNEL_DS);
45020 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
45021 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
45022 set_fs(oldfs);
45023 }
45024
45025 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45026
45027 /* Write the data. */
45028 oldfs = get_fs(); set_fs(KERNEL_DS);
45029 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
45030 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
45031 set_fs(oldfs);
45032 if (host_err < 0)
45033 goto out_nfserr;
45034 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
45035 */
45036
45037 oldfs = get_fs(); set_fs(KERNEL_DS);
45038 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
45039 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
45040 set_fs(oldfs);
45041
45042 if (host_err < 0)
45043 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
45044 index 9fde1c0..14e8827 100644
45045 --- a/fs/notify/fanotify/fanotify_user.c
45046 +++ b/fs/notify/fanotify/fanotify_user.c
45047 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
45048 goto out_close_fd;
45049
45050 ret = -EFAULT;
45051 - if (copy_to_user(buf, &fanotify_event_metadata,
45052 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45053 + copy_to_user(buf, &fanotify_event_metadata,
45054 fanotify_event_metadata.event_len))
45055 goto out_kill_access_response;
45056
45057 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
45058 index ee18815..7aa5d01 100644
45059 --- a/fs/notify/notification.c
45060 +++ b/fs/notify/notification.c
45061 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45062 * get set to 0 so it will never get 'freed'
45063 */
45064 static struct fsnotify_event *q_overflow_event;
45065 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45066 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45067
45068 /**
45069 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45070 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45071 */
45072 u32 fsnotify_get_cookie(void)
45073 {
45074 - return atomic_inc_return(&fsnotify_sync_cookie);
45075 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45076 }
45077 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45078
45079 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45080 index 99e3610..02c1068 100644
45081 --- a/fs/ntfs/dir.c
45082 +++ b/fs/ntfs/dir.c
45083 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
45084 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45085 ~(s64)(ndir->itype.index.block_size - 1)));
45086 /* Bounds checks. */
45087 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45088 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45089 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45090 "inode 0x%lx or driver bug.", vdir->i_ino);
45091 goto err_out;
45092 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45093 index c587e2d..3641eaa 100644
45094 --- a/fs/ntfs/file.c
45095 +++ b/fs/ntfs/file.c
45096 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45097 #endif /* NTFS_RW */
45098 };
45099
45100 -const struct file_operations ntfs_empty_file_ops = {};
45101 +const struct file_operations ntfs_empty_file_ops __read_only;
45102
45103 -const struct inode_operations ntfs_empty_inode_ops = {};
45104 +const struct inode_operations ntfs_empty_inode_ops __read_only;
45105 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45106 index 210c352..a174f83 100644
45107 --- a/fs/ocfs2/localalloc.c
45108 +++ b/fs/ocfs2/localalloc.c
45109 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45110 goto bail;
45111 }
45112
45113 - atomic_inc(&osb->alloc_stats.moves);
45114 + atomic_inc_unchecked(&osb->alloc_stats.moves);
45115
45116 bail:
45117 if (handle)
45118 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45119 index d355e6e..578d905 100644
45120 --- a/fs/ocfs2/ocfs2.h
45121 +++ b/fs/ocfs2/ocfs2.h
45122 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
45123
45124 struct ocfs2_alloc_stats
45125 {
45126 - atomic_t moves;
45127 - atomic_t local_data;
45128 - atomic_t bitmap_data;
45129 - atomic_t bg_allocs;
45130 - atomic_t bg_extends;
45131 + atomic_unchecked_t moves;
45132 + atomic_unchecked_t local_data;
45133 + atomic_unchecked_t bitmap_data;
45134 + atomic_unchecked_t bg_allocs;
45135 + atomic_unchecked_t bg_extends;
45136 };
45137
45138 enum ocfs2_local_alloc_state
45139 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45140 index ba5d97e..c77db25 100644
45141 --- a/fs/ocfs2/suballoc.c
45142 +++ b/fs/ocfs2/suballoc.c
45143 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45144 mlog_errno(status);
45145 goto bail;
45146 }
45147 - atomic_inc(&osb->alloc_stats.bg_extends);
45148 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45149
45150 /* You should never ask for this much metadata */
45151 BUG_ON(bits_wanted >
45152 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45153 mlog_errno(status);
45154 goto bail;
45155 }
45156 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45157 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45158
45159 *suballoc_loc = res.sr_bg_blkno;
45160 *suballoc_bit_start = res.sr_bit_offset;
45161 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45162 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45163 res->sr_bits);
45164
45165 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45166 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45167
45168 BUG_ON(res->sr_bits != 1);
45169
45170 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45171 mlog_errno(status);
45172 goto bail;
45173 }
45174 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45175 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45176
45177 BUG_ON(res.sr_bits != 1);
45178
45179 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45180 cluster_start,
45181 num_clusters);
45182 if (!status)
45183 - atomic_inc(&osb->alloc_stats.local_data);
45184 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
45185 } else {
45186 if (min_clusters > (osb->bitmap_cpg - 1)) {
45187 /* The only paths asking for contiguousness
45188 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45189 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45190 res.sr_bg_blkno,
45191 res.sr_bit_offset);
45192 - atomic_inc(&osb->alloc_stats.bitmap_data);
45193 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45194 *num_clusters = res.sr_bits;
45195 }
45196 }
45197 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45198 index 4994f8b..eaab8eb 100644
45199 --- a/fs/ocfs2/super.c
45200 +++ b/fs/ocfs2/super.c
45201 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45202 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45203 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45204 "Stats",
45205 - atomic_read(&osb->alloc_stats.bitmap_data),
45206 - atomic_read(&osb->alloc_stats.local_data),
45207 - atomic_read(&osb->alloc_stats.bg_allocs),
45208 - atomic_read(&osb->alloc_stats.moves),
45209 - atomic_read(&osb->alloc_stats.bg_extends));
45210 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45211 + atomic_read_unchecked(&osb->alloc_stats.local_data),
45212 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45213 + atomic_read_unchecked(&osb->alloc_stats.moves),
45214 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45215
45216 out += snprintf(buf + out, len - out,
45217 "%10s => State: %u Descriptor: %llu Size: %u bits "
45218 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45219 spin_lock_init(&osb->osb_xattr_lock);
45220 ocfs2_init_steal_slots(osb);
45221
45222 - atomic_set(&osb->alloc_stats.moves, 0);
45223 - atomic_set(&osb->alloc_stats.local_data, 0);
45224 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
45225 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
45226 - atomic_set(&osb->alloc_stats.bg_extends, 0);
45227 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45228 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45229 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45230 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45231 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45232
45233 /* Copy the blockcheck stats from the superblock probe */
45234 osb->osb_ecc_stats = *stats;
45235 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45236 index 5d22872..523db20 100644
45237 --- a/fs/ocfs2/symlink.c
45238 +++ b/fs/ocfs2/symlink.c
45239 @@ -142,7 +142,7 @@ bail:
45240
45241 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45242 {
45243 - char *link = nd_get_link(nd);
45244 + const char *link = nd_get_link(nd);
45245 if (!IS_ERR(link))
45246 kfree(link);
45247 }
45248 diff --git a/fs/open.c b/fs/open.c
45249 index 22c41b5..78894cf 100644
45250 --- a/fs/open.c
45251 +++ b/fs/open.c
45252 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45253 error = locks_verify_truncate(inode, NULL, length);
45254 if (!error)
45255 error = security_path_truncate(&path);
45256 +
45257 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45258 + error = -EACCES;
45259 +
45260 if (!error)
45261 error = do_truncate(path.dentry, length, 0, NULL);
45262
45263 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45264 if (__mnt_is_readonly(path.mnt))
45265 res = -EROFS;
45266
45267 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45268 + res = -EACCES;
45269 +
45270 out_path_release:
45271 path_put(&path);
45272 out:
45273 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45274 if (error)
45275 goto dput_and_out;
45276
45277 + gr_log_chdir(path.dentry, path.mnt);
45278 +
45279 set_fs_pwd(current->fs, &path);
45280
45281 dput_and_out:
45282 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45283 goto out_putf;
45284
45285 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45286 +
45287 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45288 + error = -EPERM;
45289 +
45290 + if (!error)
45291 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45292 +
45293 if (!error)
45294 set_fs_pwd(current->fs, &file->f_path);
45295 out_putf:
45296 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45297 if (error)
45298 goto dput_and_out;
45299
45300 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45301 + goto dput_and_out;
45302 +
45303 set_fs_root(current->fs, &path);
45304 +
45305 + gr_handle_chroot_chdir(&path);
45306 +
45307 error = 0;
45308 dput_and_out:
45309 path_put(&path);
45310 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45311 if (error)
45312 return error;
45313 mutex_lock(&inode->i_mutex);
45314 +
45315 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
45316 + error = -EACCES;
45317 + goto out_unlock;
45318 + }
45319 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45320 + error = -EACCES;
45321 + goto out_unlock;
45322 + }
45323 +
45324 error = security_path_chmod(path->dentry, path->mnt, mode);
45325 if (error)
45326 goto out_unlock;
45327 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45328 int error;
45329 struct iattr newattrs;
45330
45331 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
45332 + return -EACCES;
45333 +
45334 newattrs.ia_valid = ATTR_CTIME;
45335 if (user != (uid_t) -1) {
45336 newattrs.ia_valid |= ATTR_UID;
45337 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45338 index 6296b40..417c00f 100644
45339 --- a/fs/partitions/efi.c
45340 +++ b/fs/partitions/efi.c
45341 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45342 if (!gpt)
45343 return NULL;
45344
45345 + if (!le32_to_cpu(gpt->num_partition_entries))
45346 + return NULL;
45347 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45348 + if (!pte)
45349 + return NULL;
45350 +
45351 count = le32_to_cpu(gpt->num_partition_entries) *
45352 le32_to_cpu(gpt->sizeof_partition_entry);
45353 - if (!count)
45354 - return NULL;
45355 - pte = kzalloc(count, GFP_KERNEL);
45356 - if (!pte)
45357 - return NULL;
45358 -
45359 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45360 (u8 *) pte,
45361 count) < count) {
45362 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45363 index bd8ae78..539d250 100644
45364 --- a/fs/partitions/ldm.c
45365 +++ b/fs/partitions/ldm.c
45366 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45367 goto found;
45368 }
45369
45370 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45371 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45372 if (!f) {
45373 ldm_crit ("Out of memory.");
45374 return false;
45375 diff --git a/fs/pipe.c b/fs/pipe.c
45376 index 4065f07..68c0706 100644
45377 --- a/fs/pipe.c
45378 +++ b/fs/pipe.c
45379 @@ -420,9 +420,9 @@ redo:
45380 }
45381 if (bufs) /* More to do? */
45382 continue;
45383 - if (!pipe->writers)
45384 + if (!atomic_read(&pipe->writers))
45385 break;
45386 - if (!pipe->waiting_writers) {
45387 + if (!atomic_read(&pipe->waiting_writers)) {
45388 /* syscall merging: Usually we must not sleep
45389 * if O_NONBLOCK is set, or if we got some data.
45390 * But if a writer sleeps in kernel space, then
45391 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45392 mutex_lock(&inode->i_mutex);
45393 pipe = inode->i_pipe;
45394
45395 - if (!pipe->readers) {
45396 + if (!atomic_read(&pipe->readers)) {
45397 send_sig(SIGPIPE, current, 0);
45398 ret = -EPIPE;
45399 goto out;
45400 @@ -530,7 +530,7 @@ redo1:
45401 for (;;) {
45402 int bufs;
45403
45404 - if (!pipe->readers) {
45405 + if (!atomic_read(&pipe->readers)) {
45406 send_sig(SIGPIPE, current, 0);
45407 if (!ret)
45408 ret = -EPIPE;
45409 @@ -616,9 +616,9 @@ redo2:
45410 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45411 do_wakeup = 0;
45412 }
45413 - pipe->waiting_writers++;
45414 + atomic_inc(&pipe->waiting_writers);
45415 pipe_wait(pipe);
45416 - pipe->waiting_writers--;
45417 + atomic_dec(&pipe->waiting_writers);
45418 }
45419 out:
45420 mutex_unlock(&inode->i_mutex);
45421 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45422 mask = 0;
45423 if (filp->f_mode & FMODE_READ) {
45424 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45425 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45426 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45427 mask |= POLLHUP;
45428 }
45429
45430 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45431 * Most Unices do not set POLLERR for FIFOs but on Linux they
45432 * behave exactly like pipes for poll().
45433 */
45434 - if (!pipe->readers)
45435 + if (!atomic_read(&pipe->readers))
45436 mask |= POLLERR;
45437 }
45438
45439 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45440
45441 mutex_lock(&inode->i_mutex);
45442 pipe = inode->i_pipe;
45443 - pipe->readers -= decr;
45444 - pipe->writers -= decw;
45445 + atomic_sub(decr, &pipe->readers);
45446 + atomic_sub(decw, &pipe->writers);
45447
45448 - if (!pipe->readers && !pipe->writers) {
45449 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45450 free_pipe_info(inode);
45451 } else {
45452 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45453 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45454
45455 if (inode->i_pipe) {
45456 ret = 0;
45457 - inode->i_pipe->readers++;
45458 + atomic_inc(&inode->i_pipe->readers);
45459 }
45460
45461 mutex_unlock(&inode->i_mutex);
45462 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45463
45464 if (inode->i_pipe) {
45465 ret = 0;
45466 - inode->i_pipe->writers++;
45467 + atomic_inc(&inode->i_pipe->writers);
45468 }
45469
45470 mutex_unlock(&inode->i_mutex);
45471 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45472 if (inode->i_pipe) {
45473 ret = 0;
45474 if (filp->f_mode & FMODE_READ)
45475 - inode->i_pipe->readers++;
45476 + atomic_inc(&inode->i_pipe->readers);
45477 if (filp->f_mode & FMODE_WRITE)
45478 - inode->i_pipe->writers++;
45479 + atomic_inc(&inode->i_pipe->writers);
45480 }
45481
45482 mutex_unlock(&inode->i_mutex);
45483 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45484 inode->i_pipe = NULL;
45485 }
45486
45487 -static struct vfsmount *pipe_mnt __read_mostly;
45488 +struct vfsmount *pipe_mnt __read_mostly;
45489
45490 /*
45491 * pipefs_dname() is called from d_path().
45492 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45493 goto fail_iput;
45494 inode->i_pipe = pipe;
45495
45496 - pipe->readers = pipe->writers = 1;
45497 + atomic_set(&pipe->readers, 1);
45498 + atomic_set(&pipe->writers, 1);
45499 inode->i_fop = &rdwr_pipefifo_fops;
45500
45501 /*
45502 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45503 index 15af622..0e9f4467 100644
45504 --- a/fs/proc/Kconfig
45505 +++ b/fs/proc/Kconfig
45506 @@ -30,12 +30,12 @@ config PROC_FS
45507
45508 config PROC_KCORE
45509 bool "/proc/kcore support" if !ARM
45510 - depends on PROC_FS && MMU
45511 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45512
45513 config PROC_VMCORE
45514 bool "/proc/vmcore support"
45515 - depends on PROC_FS && CRASH_DUMP
45516 - default y
45517 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45518 + default n
45519 help
45520 Exports the dump image of crashed kernel in ELF format.
45521
45522 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45523 limited in memory.
45524
45525 config PROC_PAGE_MONITOR
45526 - default y
45527 - depends on PROC_FS && MMU
45528 + default n
45529 + depends on PROC_FS && MMU && !GRKERNSEC
45530 bool "Enable /proc page monitoring" if EXPERT
45531 help
45532 Various /proc files exist to monitor process memory utilization:
45533 diff --git a/fs/proc/array.c b/fs/proc/array.c
45534 index 3a1dafd..bf1bd84 100644
45535 --- a/fs/proc/array.c
45536 +++ b/fs/proc/array.c
45537 @@ -60,6 +60,7 @@
45538 #include <linux/tty.h>
45539 #include <linux/string.h>
45540 #include <linux/mman.h>
45541 +#include <linux/grsecurity.h>
45542 #include <linux/proc_fs.h>
45543 #include <linux/ioport.h>
45544 #include <linux/uaccess.h>
45545 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45546 seq_putc(m, '\n');
45547 }
45548
45549 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45550 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45551 +{
45552 + if (p->mm)
45553 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45554 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45555 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45556 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45557 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45558 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45559 + else
45560 + seq_printf(m, "PaX:\t-----\n");
45561 +}
45562 +#endif
45563 +
45564 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45565 struct pid *pid, struct task_struct *task)
45566 {
45567 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45568 task_cpus_allowed(m, task);
45569 cpuset_task_status_allowed(m, task);
45570 task_context_switch_counts(m, task);
45571 +
45572 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45573 + task_pax(m, task);
45574 +#endif
45575 +
45576 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45577 + task_grsec_rbac(m, task);
45578 +#endif
45579 +
45580 return 0;
45581 }
45582
45583 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45584 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45585 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45586 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45587 +#endif
45588 +
45589 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45590 struct pid *pid, struct task_struct *task, int whole)
45591 {
45592 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45593 char tcomm[sizeof(task->comm)];
45594 unsigned long flags;
45595
45596 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45597 + if (current->exec_id != m->exec_id) {
45598 + gr_log_badprocpid("stat");
45599 + return 0;
45600 + }
45601 +#endif
45602 +
45603 state = *get_task_state(task);
45604 vsize = eip = esp = 0;
45605 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45606 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45607 gtime = task->gtime;
45608 }
45609
45610 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45611 + if (PAX_RAND_FLAGS(mm)) {
45612 + eip = 0;
45613 + esp = 0;
45614 + wchan = 0;
45615 + }
45616 +#endif
45617 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45618 + wchan = 0;
45619 + eip =0;
45620 + esp =0;
45621 +#endif
45622 +
45623 /* scale priority and nice values from timeslices to -20..20 */
45624 /* to make it look like a "normal" Unix priority/nice value */
45625 priority = task_prio(task);
45626 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45627 vsize,
45628 mm ? get_mm_rss(mm) : 0,
45629 rsslim,
45630 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45631 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45632 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45633 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45634 +#else
45635 mm ? (permitted ? mm->start_code : 1) : 0,
45636 mm ? (permitted ? mm->end_code : 1) : 0,
45637 (permitted && mm) ? mm->start_stack : 0,
45638 +#endif
45639 esp,
45640 eip,
45641 /* The signal information here is obsolete.
45642 @@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45643 struct pid *pid, struct task_struct *task)
45644 {
45645 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
45646 - struct mm_struct *mm = get_task_mm(task);
45647 + struct mm_struct *mm;
45648
45649 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45650 + if (current->exec_id != m->exec_id) {
45651 + gr_log_badprocpid("statm");
45652 + return 0;
45653 + }
45654 +#endif
45655 + mm = get_task_mm(task);
45656 if (mm) {
45657 size = task_statm(mm, &shared, &text, &data, &resident);
45658 mmput(mm);
45659 @@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45660
45661 return 0;
45662 }
45663 +
45664 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45665 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45666 +{
45667 + u32 curr_ip = 0;
45668 + unsigned long flags;
45669 +
45670 + if (lock_task_sighand(task, &flags)) {
45671 + curr_ip = task->signal->curr_ip;
45672 + unlock_task_sighand(task, &flags);
45673 + }
45674 +
45675 + return sprintf(buffer, "%pI4\n", &curr_ip);
45676 +}
45677 +#endif
45678 diff --git a/fs/proc/base.c b/fs/proc/base.c
45679 index 1ace83d..f5e575d 100644
45680 --- a/fs/proc/base.c
45681 +++ b/fs/proc/base.c
45682 @@ -107,6 +107,22 @@ struct pid_entry {
45683 union proc_op op;
45684 };
45685
45686 +struct getdents_callback {
45687 + struct linux_dirent __user * current_dir;
45688 + struct linux_dirent __user * previous;
45689 + struct file * file;
45690 + int count;
45691 + int error;
45692 +};
45693 +
45694 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45695 + loff_t offset, u64 ino, unsigned int d_type)
45696 +{
45697 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45698 + buf->error = -EINVAL;
45699 + return 0;
45700 +}
45701 +
45702 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45703 .name = (NAME), \
45704 .len = sizeof(NAME) - 1, \
45705 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45706 return result;
45707 }
45708
45709 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45710 -{
45711 - struct mm_struct *mm;
45712 - int err;
45713 -
45714 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45715 - if (err)
45716 - return ERR_PTR(err);
45717 -
45718 - mm = get_task_mm(task);
45719 - if (mm && mm != current->mm &&
45720 - !ptrace_may_access(task, mode)) {
45721 - mmput(mm);
45722 - mm = ERR_PTR(-EACCES);
45723 - }
45724 - mutex_unlock(&task->signal->cred_guard_mutex);
45725 -
45726 - return mm;
45727 -}
45728 -
45729 struct mm_struct *mm_for_maps(struct task_struct *task)
45730 {
45731 return mm_access(task, PTRACE_MODE_READ);
45732 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45733 if (!mm->arg_end)
45734 goto out_mm; /* Shh! No looking before we're done */
45735
45736 + if (gr_acl_handle_procpidmem(task))
45737 + goto out_mm;
45738 +
45739 len = mm->arg_end - mm->arg_start;
45740
45741 if (len > PAGE_SIZE)
45742 @@ -256,12 +255,28 @@ out:
45743 return res;
45744 }
45745
45746 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45747 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45748 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45749 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45750 +#endif
45751 +
45752 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45753 {
45754 struct mm_struct *mm = mm_for_maps(task);
45755 int res = PTR_ERR(mm);
45756 if (mm && !IS_ERR(mm)) {
45757 unsigned int nwords = 0;
45758 +
45759 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45760 + /* allow if we're currently ptracing this task */
45761 + if (PAX_RAND_FLAGS(mm) &&
45762 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45763 + mmput(mm);
45764 + return 0;
45765 + }
45766 +#endif
45767 +
45768 do {
45769 nwords += 2;
45770 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45771 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45772 }
45773
45774
45775 -#ifdef CONFIG_KALLSYMS
45776 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45777 /*
45778 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45779 * Returns the resolved symbol. If that fails, simply return the address.
45780 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45781 mutex_unlock(&task->signal->cred_guard_mutex);
45782 }
45783
45784 -#ifdef CONFIG_STACKTRACE
45785 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45786
45787 #define MAX_STACK_TRACE_DEPTH 64
45788
45789 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45790 return count;
45791 }
45792
45793 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45794 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45795 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45796 {
45797 long nr;
45798 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45799 /************************************************************************/
45800
45801 /* permission checks */
45802 -static int proc_fd_access_allowed(struct inode *inode)
45803 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45804 {
45805 struct task_struct *task;
45806 int allowed = 0;
45807 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45808 */
45809 task = get_proc_task(inode);
45810 if (task) {
45811 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45812 + if (log)
45813 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45814 + else
45815 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45816 put_task_struct(task);
45817 }
45818 return allowed;
45819 @@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file)
45820 file->f_mode |= FMODE_UNSIGNED_OFFSET;
45821 file->private_data = mm;
45822
45823 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45824 + file->f_version = current->exec_id;
45825 +#endif
45826 +
45827 return 0;
45828 }
45829
45830 @@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
45831 ssize_t copied;
45832 char *page;
45833
45834 +#ifdef CONFIG_GRKERNSEC
45835 + if (write)
45836 + return -EPERM;
45837 +#endif
45838 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45839 + if (file->f_version != current->exec_id) {
45840 + gr_log_badprocpid("mem");
45841 + return 0;
45842 + }
45843 +#endif
45844 +
45845 if (!mm)
45846 return 0;
45847
45848 @@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45849 if (!task)
45850 goto out_no_task;
45851
45852 + if (gr_acl_handle_procpidmem(task))
45853 + goto out;
45854 +
45855 ret = -ENOMEM;
45856 page = (char *)__get_free_page(GFP_TEMPORARY);
45857 if (!page)
45858 @@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45859 path_put(&nd->path);
45860
45861 /* Are we allowed to snoop on the tasks file descriptors? */
45862 - if (!proc_fd_access_allowed(inode))
45863 + if (!proc_fd_access_allowed(inode,0))
45864 goto out;
45865
45866 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45867 @@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45868 struct path path;
45869
45870 /* Are we allowed to snoop on the tasks file descriptors? */
45871 - if (!proc_fd_access_allowed(inode))
45872 - goto out;
45873 + /* logging this is needed for learning on chromium to work properly,
45874 + but we don't want to flood the logs from 'ps' which does a readlink
45875 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45876 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45877 + */
45878 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45879 + if (!proc_fd_access_allowed(inode,0))
45880 + goto out;
45881 + } else {
45882 + if (!proc_fd_access_allowed(inode,1))
45883 + goto out;
45884 + }
45885
45886 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45887 if (error)
45888 @@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45889 rcu_read_lock();
45890 cred = __task_cred(task);
45891 inode->i_uid = cred->euid;
45892 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45893 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45894 +#else
45895 inode->i_gid = cred->egid;
45896 +#endif
45897 rcu_read_unlock();
45898 }
45899 security_task_to_inode(task, inode);
45900 @@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45901 struct inode *inode = dentry->d_inode;
45902 struct task_struct *task;
45903 const struct cred *cred;
45904 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45905 + const struct cred *tmpcred = current_cred();
45906 +#endif
45907
45908 generic_fillattr(inode, stat);
45909
45910 @@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45911 stat->uid = 0;
45912 stat->gid = 0;
45913 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45914 +
45915 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45916 + rcu_read_unlock();
45917 + return -ENOENT;
45918 + }
45919 +
45920 if (task) {
45921 + cred = __task_cred(task);
45922 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45923 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45924 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45925 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45926 +#endif
45927 + ) {
45928 +#endif
45929 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45930 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45931 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45932 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45933 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45934 +#endif
45935 task_dumpable(task)) {
45936 - cred = __task_cred(task);
45937 stat->uid = cred->euid;
45938 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45939 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45940 +#else
45941 stat->gid = cred->egid;
45942 +#endif
45943 }
45944 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45945 + } else {
45946 + rcu_read_unlock();
45947 + return -ENOENT;
45948 + }
45949 +#endif
45950 }
45951 rcu_read_unlock();
45952 return 0;
45953 @@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45954
45955 if (task) {
45956 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45957 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45958 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45959 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45960 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45961 +#endif
45962 task_dumpable(task)) {
45963 rcu_read_lock();
45964 cred = __task_cred(task);
45965 inode->i_uid = cred->euid;
45966 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45967 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45968 +#else
45969 inode->i_gid = cred->egid;
45970 +#endif
45971 rcu_read_unlock();
45972 } else {
45973 inode->i_uid = 0;
45974 @@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45975 int fd = proc_fd(inode);
45976
45977 if (task) {
45978 - files = get_files_struct(task);
45979 + if (!gr_acl_handle_procpidmem(task))
45980 + files = get_files_struct(task);
45981 put_task_struct(task);
45982 }
45983 if (files) {
45984 @@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = {
45985 */
45986 static int proc_fd_permission(struct inode *inode, int mask)
45987 {
45988 + struct task_struct *task;
45989 int rv = generic_permission(inode, mask);
45990 - if (rv == 0)
45991 - return 0;
45992 +
45993 if (task_pid(current) == proc_pid(inode))
45994 rv = 0;
45995 +
45996 + task = get_proc_task(inode);
45997 + if (task == NULL)
45998 + return rv;
45999 +
46000 + if (gr_acl_handle_procpidmem(task))
46001 + rv = -EACCES;
46002 +
46003 + put_task_struct(task);
46004 +
46005 return rv;
46006 }
46007
46008 @@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
46009 if (!task)
46010 goto out_no_task;
46011
46012 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46013 + goto out;
46014 +
46015 /*
46016 * Yes, it does not scale. And it should not. Don't add
46017 * new entries into /proc/<tgid>/ without very good reasons.
46018 @@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp,
46019 if (!task)
46020 goto out_no_task;
46021
46022 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46023 + goto out;
46024 +
46025 ret = 0;
46026 i = filp->f_pos;
46027 switch (i) {
46028 @@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
46029 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
46030 void *cookie)
46031 {
46032 - char *s = nd_get_link(nd);
46033 + const char *s = nd_get_link(nd);
46034 if (!IS_ERR(s))
46035 __putname(s);
46036 }
46037 @@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = {
46038 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
46039 #endif
46040 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46041 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46042 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46043 INF("syscall", S_IRUGO, proc_pid_syscall),
46044 #endif
46045 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46046 @@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = {
46047 #ifdef CONFIG_SECURITY
46048 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46049 #endif
46050 -#ifdef CONFIG_KALLSYMS
46051 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46052 INF("wchan", S_IRUGO, proc_pid_wchan),
46053 #endif
46054 -#ifdef CONFIG_STACKTRACE
46055 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46056 ONE("stack", S_IRUGO, proc_pid_stack),
46057 #endif
46058 #ifdef CONFIG_SCHEDSTATS
46059 @@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = {
46060 #ifdef CONFIG_HARDWALL
46061 INF("hardwall", S_IRUGO, proc_pid_hardwall),
46062 #endif
46063 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46064 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
46065 +#endif
46066 };
46067
46068 static int proc_tgid_base_readdir(struct file * filp,
46069 @@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
46070 if (!inode)
46071 goto out;
46072
46073 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46074 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
46075 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46076 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46077 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
46078 +#else
46079 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
46080 +#endif
46081 inode->i_op = &proc_tgid_base_inode_operations;
46082 inode->i_fop = &proc_tgid_base_operations;
46083 inode->i_flags|=S_IMMUTABLE;
46084 @@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
46085 if (!task)
46086 goto out;
46087
46088 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46089 + goto out_put_task;
46090 +
46091 result = proc_pid_instantiate(dir, dentry, task, NULL);
46092 +out_put_task:
46093 put_task_struct(task);
46094 out:
46095 return result;
46096 @@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46097 {
46098 unsigned int nr;
46099 struct task_struct *reaper;
46100 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46101 + const struct cred *tmpcred = current_cred();
46102 + const struct cred *itercred;
46103 +#endif
46104 + filldir_t __filldir = filldir;
46105 struct tgid_iter iter;
46106 struct pid_namespace *ns;
46107
46108 @@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46109 for (iter = next_tgid(ns, iter);
46110 iter.task;
46111 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46112 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46113 + rcu_read_lock();
46114 + itercred = __task_cred(iter.task);
46115 +#endif
46116 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46117 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46118 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46119 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46120 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46121 +#endif
46122 + )
46123 +#endif
46124 + )
46125 + __filldir = &gr_fake_filldir;
46126 + else
46127 + __filldir = filldir;
46128 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46129 + rcu_read_unlock();
46130 +#endif
46131 filp->f_pos = iter.tgid + TGID_OFFSET;
46132 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46133 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46134 put_task_struct(iter.task);
46135 goto out;
46136 }
46137 @@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = {
46138 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46139 #endif
46140 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46141 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46142 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46143 INF("syscall", S_IRUGO, proc_pid_syscall),
46144 #endif
46145 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46146 @@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = {
46147 #ifdef CONFIG_SECURITY
46148 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46149 #endif
46150 -#ifdef CONFIG_KALLSYMS
46151 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46152 INF("wchan", S_IRUGO, proc_pid_wchan),
46153 #endif
46154 -#ifdef CONFIG_STACKTRACE
46155 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46156 ONE("stack", S_IRUGO, proc_pid_stack),
46157 #endif
46158 #ifdef CONFIG_SCHEDSTATS
46159 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46160 index 82676e3..5f8518a 100644
46161 --- a/fs/proc/cmdline.c
46162 +++ b/fs/proc/cmdline.c
46163 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46164
46165 static int __init proc_cmdline_init(void)
46166 {
46167 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46168 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46169 +#else
46170 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46171 +#endif
46172 return 0;
46173 }
46174 module_init(proc_cmdline_init);
46175 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46176 index b143471..bb105e5 100644
46177 --- a/fs/proc/devices.c
46178 +++ b/fs/proc/devices.c
46179 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46180
46181 static int __init proc_devices_init(void)
46182 {
46183 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46184 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46185 +#else
46186 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46187 +#endif
46188 return 0;
46189 }
46190 module_init(proc_devices_init);
46191 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46192 index 7737c54..7172574 100644
46193 --- a/fs/proc/inode.c
46194 +++ b/fs/proc/inode.c
46195 @@ -18,12 +18,18 @@
46196 #include <linux/module.h>
46197 #include <linux/sysctl.h>
46198 #include <linux/slab.h>
46199 +#include <linux/grsecurity.h>
46200
46201 #include <asm/system.h>
46202 #include <asm/uaccess.h>
46203
46204 #include "internal.h"
46205
46206 +#ifdef CONFIG_PROC_SYSCTL
46207 +extern const struct inode_operations proc_sys_inode_operations;
46208 +extern const struct inode_operations proc_sys_dir_operations;
46209 +#endif
46210 +
46211 static void proc_evict_inode(struct inode *inode)
46212 {
46213 struct proc_dir_entry *de;
46214 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46215 ns_ops = PROC_I(inode)->ns_ops;
46216 if (ns_ops && ns_ops->put)
46217 ns_ops->put(PROC_I(inode)->ns);
46218 +
46219 +#ifdef CONFIG_PROC_SYSCTL
46220 + if (inode->i_op == &proc_sys_inode_operations ||
46221 + inode->i_op == &proc_sys_dir_operations)
46222 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46223 +#endif
46224 +
46225 }
46226
46227 static struct kmem_cache * proc_inode_cachep;
46228 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46229 if (de->mode) {
46230 inode->i_mode = de->mode;
46231 inode->i_uid = de->uid;
46232 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46233 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46234 +#else
46235 inode->i_gid = de->gid;
46236 +#endif
46237 }
46238 if (de->size)
46239 inode->i_size = de->size;
46240 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46241 index 7838e5c..ff92cbc 100644
46242 --- a/fs/proc/internal.h
46243 +++ b/fs/proc/internal.h
46244 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46245 struct pid *pid, struct task_struct *task);
46246 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46247 struct pid *pid, struct task_struct *task);
46248 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46249 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46250 +#endif
46251 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46252
46253 extern const struct file_operations proc_maps_operations;
46254 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46255 index d245cb2..f4e8498 100644
46256 --- a/fs/proc/kcore.c
46257 +++ b/fs/proc/kcore.c
46258 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46259 * the addresses in the elf_phdr on our list.
46260 */
46261 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46262 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46263 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46264 + if (tsz > buflen)
46265 tsz = buflen;
46266 -
46267 +
46268 while (buflen) {
46269 struct kcore_list *m;
46270
46271 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46272 kfree(elf_buf);
46273 } else {
46274 if (kern_addr_valid(start)) {
46275 - unsigned long n;
46276 + char *elf_buf;
46277 + mm_segment_t oldfs;
46278
46279 - n = copy_to_user(buffer, (char *)start, tsz);
46280 - /*
46281 - * We cannot distingush between fault on source
46282 - * and fault on destination. When this happens
46283 - * we clear too and hope it will trigger the
46284 - * EFAULT again.
46285 - */
46286 - if (n) {
46287 - if (clear_user(buffer + tsz - n,
46288 - n))
46289 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46290 + if (!elf_buf)
46291 + return -ENOMEM;
46292 + oldfs = get_fs();
46293 + set_fs(KERNEL_DS);
46294 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46295 + set_fs(oldfs);
46296 + if (copy_to_user(buffer, elf_buf, tsz)) {
46297 + kfree(elf_buf);
46298 return -EFAULT;
46299 + }
46300 }
46301 + set_fs(oldfs);
46302 + kfree(elf_buf);
46303 } else {
46304 if (clear_user(buffer, tsz))
46305 return -EFAULT;
46306 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46307
46308 static int open_kcore(struct inode *inode, struct file *filp)
46309 {
46310 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46311 + return -EPERM;
46312 +#endif
46313 if (!capable(CAP_SYS_RAWIO))
46314 return -EPERM;
46315 if (kcore_need_update)
46316 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46317 index 80e4645..53e5fcf 100644
46318 --- a/fs/proc/meminfo.c
46319 +++ b/fs/proc/meminfo.c
46320 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46321 vmi.used >> 10,
46322 vmi.largest_chunk >> 10
46323 #ifdef CONFIG_MEMORY_FAILURE
46324 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46325 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46326 #endif
46327 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46328 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46329 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46330 index b1822dd..df622cb 100644
46331 --- a/fs/proc/nommu.c
46332 +++ b/fs/proc/nommu.c
46333 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46334 if (len < 1)
46335 len = 1;
46336 seq_printf(m, "%*c", len, ' ');
46337 - seq_path(m, &file->f_path, "");
46338 + seq_path(m, &file->f_path, "\n\\");
46339 }
46340
46341 seq_putc(m, '\n');
46342 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46343 index f738024..876984a 100644
46344 --- a/fs/proc/proc_net.c
46345 +++ b/fs/proc/proc_net.c
46346 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46347 struct task_struct *task;
46348 struct nsproxy *ns;
46349 struct net *net = NULL;
46350 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46351 + const struct cred *cred = current_cred();
46352 +#endif
46353 +
46354 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46355 + if (cred->fsuid)
46356 + return net;
46357 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46358 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46359 + return net;
46360 +#endif
46361
46362 rcu_read_lock();
46363 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46364 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46365 index a6b6217..1e0579d 100644
46366 --- a/fs/proc/proc_sysctl.c
46367 +++ b/fs/proc/proc_sysctl.c
46368 @@ -9,11 +9,13 @@
46369 #include <linux/namei.h>
46370 #include "internal.h"
46371
46372 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46373 +
46374 static const struct dentry_operations proc_sys_dentry_operations;
46375 static const struct file_operations proc_sys_file_operations;
46376 -static const struct inode_operations proc_sys_inode_operations;
46377 +const struct inode_operations proc_sys_inode_operations;
46378 static const struct file_operations proc_sys_dir_file_operations;
46379 -static const struct inode_operations proc_sys_dir_operations;
46380 +const struct inode_operations proc_sys_dir_operations;
46381
46382 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46383 {
46384 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46385
46386 err = NULL;
46387 d_set_d_op(dentry, &proc_sys_dentry_operations);
46388 +
46389 + gr_handle_proc_create(dentry, inode);
46390 +
46391 d_add(dentry, inode);
46392
46393 + if (gr_handle_sysctl(p, MAY_EXEC))
46394 + err = ERR_PTR(-ENOENT);
46395 +
46396 out:
46397 sysctl_head_finish(head);
46398 return err;
46399 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46400 if (!table->proc_handler)
46401 goto out;
46402
46403 +#ifdef CONFIG_GRKERNSEC
46404 + error = -EPERM;
46405 + if (write && !capable(CAP_SYS_ADMIN))
46406 + goto out;
46407 +#endif
46408 +
46409 /* careful: calling conventions are nasty here */
46410 res = count;
46411 error = table->proc_handler(table, write, buf, &res, ppos);
46412 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46413 return -ENOMEM;
46414 } else {
46415 d_set_d_op(child, &proc_sys_dentry_operations);
46416 +
46417 + gr_handle_proc_create(child, inode);
46418 +
46419 d_add(child, inode);
46420 }
46421 } else {
46422 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46423 if (*pos < file->f_pos)
46424 continue;
46425
46426 + if (gr_handle_sysctl(table, 0))
46427 + continue;
46428 +
46429 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46430 if (res)
46431 return res;
46432 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46433 if (IS_ERR(head))
46434 return PTR_ERR(head);
46435
46436 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46437 + return -ENOENT;
46438 +
46439 generic_fillattr(inode, stat);
46440 if (table)
46441 stat->mode = (stat->mode & S_IFMT) | table->mode;
46442 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46443 .llseek = generic_file_llseek,
46444 };
46445
46446 -static const struct inode_operations proc_sys_inode_operations = {
46447 +const struct inode_operations proc_sys_inode_operations = {
46448 .permission = proc_sys_permission,
46449 .setattr = proc_sys_setattr,
46450 .getattr = proc_sys_getattr,
46451 };
46452
46453 -static const struct inode_operations proc_sys_dir_operations = {
46454 +const struct inode_operations proc_sys_dir_operations = {
46455 .lookup = proc_sys_lookup,
46456 .permission = proc_sys_permission,
46457 .setattr = proc_sys_setattr,
46458 diff --git a/fs/proc/root.c b/fs/proc/root.c
46459 index 03102d9..4ae347e 100644
46460 --- a/fs/proc/root.c
46461 +++ b/fs/proc/root.c
46462 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
46463 #ifdef CONFIG_PROC_DEVICETREE
46464 proc_device_tree_init();
46465 #endif
46466 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46467 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46468 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46469 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46470 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46471 +#endif
46472 +#else
46473 proc_mkdir("bus", NULL);
46474 +#endif
46475 proc_sys_init();
46476 }
46477
46478 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46479 index 7dcd2a2..b2f410e 100644
46480 --- a/fs/proc/task_mmu.c
46481 +++ b/fs/proc/task_mmu.c
46482 @@ -11,6 +11,7 @@
46483 #include <linux/rmap.h>
46484 #include <linux/swap.h>
46485 #include <linux/swapops.h>
46486 +#include <linux/grsecurity.h>
46487
46488 #include <asm/elf.h>
46489 #include <asm/uaccess.h>
46490 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46491 "VmExe:\t%8lu kB\n"
46492 "VmLib:\t%8lu kB\n"
46493 "VmPTE:\t%8lu kB\n"
46494 - "VmSwap:\t%8lu kB\n",
46495 - hiwater_vm << (PAGE_SHIFT-10),
46496 + "VmSwap:\t%8lu kB\n"
46497 +
46498 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46499 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46500 +#endif
46501 +
46502 + ,hiwater_vm << (PAGE_SHIFT-10),
46503 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46504 mm->locked_vm << (PAGE_SHIFT-10),
46505 mm->pinned_vm << (PAGE_SHIFT-10),
46506 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46507 data << (PAGE_SHIFT-10),
46508 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46509 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46510 - swap << (PAGE_SHIFT-10));
46511 + swap << (PAGE_SHIFT-10)
46512 +
46513 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46514 + , mm->context.user_cs_base, mm->context.user_cs_limit
46515 +#endif
46516 +
46517 + );
46518 }
46519
46520 unsigned long task_vsize(struct mm_struct *mm)
46521 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46522 return ret;
46523 }
46524
46525 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46526 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46527 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46528 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46529 +#endif
46530 +
46531 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46532 {
46533 struct mm_struct *mm = vma->vm_mm;
46534 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46535 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46536 }
46537
46538 - /* We don't show the stack guard page in /proc/maps */
46539 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46540 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46541 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46542 +#else
46543 start = vma->vm_start;
46544 - if (stack_guard_page_start(vma, start))
46545 - start += PAGE_SIZE;
46546 end = vma->vm_end;
46547 - if (stack_guard_page_end(vma, end))
46548 - end -= PAGE_SIZE;
46549 +#endif
46550
46551 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46552 start,
46553 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46554 flags & VM_WRITE ? 'w' : '-',
46555 flags & VM_EXEC ? 'x' : '-',
46556 flags & VM_MAYSHARE ? 's' : 'p',
46557 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46558 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46559 +#else
46560 pgoff,
46561 +#endif
46562 MAJOR(dev), MINOR(dev), ino, &len);
46563
46564 /*
46565 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46566 */
46567 if (file) {
46568 pad_len_spaces(m, len);
46569 - seq_path(m, &file->f_path, "\n");
46570 + seq_path(m, &file->f_path, "\n\\");
46571 } else {
46572 const char *name = arch_vma_name(vma);
46573 if (!name) {
46574 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46575 if (vma->vm_start <= mm->brk &&
46576 vma->vm_end >= mm->start_brk) {
46577 name = "[heap]";
46578 - } else if (vma->vm_start <= mm->start_stack &&
46579 - vma->vm_end >= mm->start_stack) {
46580 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46581 + (vma->vm_start <= mm->start_stack &&
46582 + vma->vm_end >= mm->start_stack)) {
46583 name = "[stack]";
46584 }
46585 } else {
46586 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
46587 struct proc_maps_private *priv = m->private;
46588 struct task_struct *task = priv->task;
46589
46590 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46591 + if (current->exec_id != m->exec_id) {
46592 + gr_log_badprocpid("maps");
46593 + return 0;
46594 + }
46595 +#endif
46596 +
46597 show_map_vma(m, vma);
46598
46599 if (m->count < m->size) /* vma is copied successfully */
46600 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
46601 .private = &mss,
46602 };
46603
46604 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46605 + if (current->exec_id != m->exec_id) {
46606 + gr_log_badprocpid("smaps");
46607 + return 0;
46608 + }
46609 +#endif
46610 memset(&mss, 0, sizeof mss);
46611 - mss.vma = vma;
46612 - /* mmap_sem is held in m_start */
46613 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46614 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46615 -
46616 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46617 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46618 +#endif
46619 + mss.vma = vma;
46620 + /* mmap_sem is held in m_start */
46621 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46622 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46623 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46624 + }
46625 +#endif
46626 show_map_vma(m, vma);
46627
46628 seq_printf(m,
46629 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
46630 "KernelPageSize: %8lu kB\n"
46631 "MMUPageSize: %8lu kB\n"
46632 "Locked: %8lu kB\n",
46633 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46634 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46635 +#else
46636 (vma->vm_end - vma->vm_start) >> 10,
46637 +#endif
46638 mss.resident >> 10,
46639 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46640 mss.shared_clean >> 10,
46641 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
46642 int n;
46643 char buffer[50];
46644
46645 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46646 + if (current->exec_id != m->exec_id) {
46647 + gr_log_badprocpid("numa_maps");
46648 + return 0;
46649 + }
46650 +#endif
46651 +
46652 if (!mm)
46653 return 0;
46654
46655 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
46656 mpol_to_str(buffer, sizeof(buffer), pol, 0);
46657 mpol_cond_put(pol);
46658
46659 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46660 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
46661 +#else
46662 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
46663 +#endif
46664
46665 if (file) {
46666 seq_printf(m, " file=");
46667 - seq_path(m, &file->f_path, "\n\t= ");
46668 + seq_path(m, &file->f_path, "\n\t\\= ");
46669 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46670 seq_printf(m, " heap");
46671 } else if (vma->vm_start <= mm->start_stack &&
46672 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46673 index 980de54..2a4db5f 100644
46674 --- a/fs/proc/task_nommu.c
46675 +++ b/fs/proc/task_nommu.c
46676 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46677 else
46678 bytes += kobjsize(mm);
46679
46680 - if (current->fs && current->fs->users > 1)
46681 + if (current->fs && atomic_read(&current->fs->users) > 1)
46682 sbytes += kobjsize(current->fs);
46683 else
46684 bytes += kobjsize(current->fs);
46685 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46686
46687 if (file) {
46688 pad_len_spaces(m, len);
46689 - seq_path(m, &file->f_path, "");
46690 + seq_path(m, &file->f_path, "\n\\");
46691 } else if (mm) {
46692 if (vma->vm_start <= mm->start_stack &&
46693 vma->vm_end >= mm->start_stack) {
46694 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46695 index d67908b..d13f6a6 100644
46696 --- a/fs/quota/netlink.c
46697 +++ b/fs/quota/netlink.c
46698 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46699 void quota_send_warning(short type, unsigned int id, dev_t dev,
46700 const char warntype)
46701 {
46702 - static atomic_t seq;
46703 + static atomic_unchecked_t seq;
46704 struct sk_buff *skb;
46705 void *msg_head;
46706 int ret;
46707 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46708 "VFS: Not enough memory to send quota warning.\n");
46709 return;
46710 }
46711 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46712 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46713 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46714 if (!msg_head) {
46715 printk(KERN_ERR
46716 diff --git a/fs/readdir.c b/fs/readdir.c
46717 index 356f715..c918d38 100644
46718 --- a/fs/readdir.c
46719 +++ b/fs/readdir.c
46720 @@ -17,6 +17,7 @@
46721 #include <linux/security.h>
46722 #include <linux/syscalls.h>
46723 #include <linux/unistd.h>
46724 +#include <linux/namei.h>
46725
46726 #include <asm/uaccess.h>
46727
46728 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46729
46730 struct readdir_callback {
46731 struct old_linux_dirent __user * dirent;
46732 + struct file * file;
46733 int result;
46734 };
46735
46736 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46737 buf->result = -EOVERFLOW;
46738 return -EOVERFLOW;
46739 }
46740 +
46741 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46742 + return 0;
46743 +
46744 buf->result++;
46745 dirent = buf->dirent;
46746 if (!access_ok(VERIFY_WRITE, dirent,
46747 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46748
46749 buf.result = 0;
46750 buf.dirent = dirent;
46751 + buf.file = file;
46752
46753 error = vfs_readdir(file, fillonedir, &buf);
46754 if (buf.result)
46755 @@ -142,6 +149,7 @@ struct linux_dirent {
46756 struct getdents_callback {
46757 struct linux_dirent __user * current_dir;
46758 struct linux_dirent __user * previous;
46759 + struct file * file;
46760 int count;
46761 int error;
46762 };
46763 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46764 buf->error = -EOVERFLOW;
46765 return -EOVERFLOW;
46766 }
46767 +
46768 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46769 + return 0;
46770 +
46771 dirent = buf->previous;
46772 if (dirent) {
46773 if (__put_user(offset, &dirent->d_off))
46774 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46775 buf.previous = NULL;
46776 buf.count = count;
46777 buf.error = 0;
46778 + buf.file = file;
46779
46780 error = vfs_readdir(file, filldir, &buf);
46781 if (error >= 0)
46782 @@ -229,6 +242,7 @@ out:
46783 struct getdents_callback64 {
46784 struct linux_dirent64 __user * current_dir;
46785 struct linux_dirent64 __user * previous;
46786 + struct file *file;
46787 int count;
46788 int error;
46789 };
46790 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46791 buf->error = -EINVAL; /* only used if we fail.. */
46792 if (reclen > buf->count)
46793 return -EINVAL;
46794 +
46795 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46796 + return 0;
46797 +
46798 dirent = buf->previous;
46799 if (dirent) {
46800 if (__put_user(offset, &dirent->d_off))
46801 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46802
46803 buf.current_dir = dirent;
46804 buf.previous = NULL;
46805 + buf.file = file;
46806 buf.count = count;
46807 buf.error = 0;
46808
46809 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46810 error = buf.error;
46811 lastdirent = buf.previous;
46812 if (lastdirent) {
46813 - typeof(lastdirent->d_off) d_off = file->f_pos;
46814 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46815 if (__put_user(d_off, &lastdirent->d_off))
46816 error = -EFAULT;
46817 else
46818 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46819 index 60c0804..d814f98 100644
46820 --- a/fs/reiserfs/do_balan.c
46821 +++ b/fs/reiserfs/do_balan.c
46822 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46823 return;
46824 }
46825
46826 - atomic_inc(&(fs_generation(tb->tb_sb)));
46827 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46828 do_balance_starts(tb);
46829
46830 /* balance leaf returns 0 except if combining L R and S into
46831 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46832 index 7a99811..a7c96c4 100644
46833 --- a/fs/reiserfs/procfs.c
46834 +++ b/fs/reiserfs/procfs.c
46835 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46836 "SMALL_TAILS " : "NO_TAILS ",
46837 replay_only(sb) ? "REPLAY_ONLY " : "",
46838 convert_reiserfs(sb) ? "CONV " : "",
46839 - atomic_read(&r->s_generation_counter),
46840 + atomic_read_unchecked(&r->s_generation_counter),
46841 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46842 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46843 SF(s_good_search_by_key_reada), SF(s_bmaps),
46844 diff --git a/fs/select.c b/fs/select.c
46845 index d33418f..2a5345e 100644
46846 --- a/fs/select.c
46847 +++ b/fs/select.c
46848 @@ -20,6 +20,7 @@
46849 #include <linux/module.h>
46850 #include <linux/slab.h>
46851 #include <linux/poll.h>
46852 +#include <linux/security.h>
46853 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46854 #include <linux/file.h>
46855 #include <linux/fdtable.h>
46856 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46857 struct poll_list *walk = head;
46858 unsigned long todo = nfds;
46859
46860 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46861 if (nfds > rlimit(RLIMIT_NOFILE))
46862 return -EINVAL;
46863
46864 diff --git a/fs/seq_file.c b/fs/seq_file.c
46865 index dba43c3..9fb8511 100644
46866 --- a/fs/seq_file.c
46867 +++ b/fs/seq_file.c
46868 @@ -9,6 +9,7 @@
46869 #include <linux/module.h>
46870 #include <linux/seq_file.h>
46871 #include <linux/slab.h>
46872 +#include <linux/sched.h>
46873
46874 #include <asm/uaccess.h>
46875 #include <asm/page.h>
46876 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
46877 memset(p, 0, sizeof(*p));
46878 mutex_init(&p->lock);
46879 p->op = op;
46880 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46881 + p->exec_id = current->exec_id;
46882 +#endif
46883
46884 /*
46885 * Wrappers around seq_open(e.g. swaps_open) need to be
46886 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46887 return 0;
46888 }
46889 if (!m->buf) {
46890 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46891 + m->size = PAGE_SIZE;
46892 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46893 if (!m->buf)
46894 return -ENOMEM;
46895 }
46896 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46897 Eoverflow:
46898 m->op->stop(m, p);
46899 kfree(m->buf);
46900 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46901 + m->size <<= 1;
46902 + m->buf = kmalloc(m->size, GFP_KERNEL);
46903 return !m->buf ? -ENOMEM : -EAGAIN;
46904 }
46905
46906 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46907 m->version = file->f_version;
46908 /* grab buffer if we didn't have one */
46909 if (!m->buf) {
46910 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46911 + m->size = PAGE_SIZE;
46912 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46913 if (!m->buf)
46914 goto Enomem;
46915 }
46916 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46917 goto Fill;
46918 m->op->stop(m, p);
46919 kfree(m->buf);
46920 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46921 + m->size <<= 1;
46922 + m->buf = kmalloc(m->size, GFP_KERNEL);
46923 if (!m->buf)
46924 goto Enomem;
46925 m->count = 0;
46926 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
46927 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46928 void *data)
46929 {
46930 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46931 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46932 int res = -ENOMEM;
46933
46934 if (op) {
46935 diff --git a/fs/splice.c b/fs/splice.c
46936 index fa2defa..8601650 100644
46937 --- a/fs/splice.c
46938 +++ b/fs/splice.c
46939 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46940 pipe_lock(pipe);
46941
46942 for (;;) {
46943 - if (!pipe->readers) {
46944 + if (!atomic_read(&pipe->readers)) {
46945 send_sig(SIGPIPE, current, 0);
46946 if (!ret)
46947 ret = -EPIPE;
46948 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46949 do_wakeup = 0;
46950 }
46951
46952 - pipe->waiting_writers++;
46953 + atomic_inc(&pipe->waiting_writers);
46954 pipe_wait(pipe);
46955 - pipe->waiting_writers--;
46956 + atomic_dec(&pipe->waiting_writers);
46957 }
46958
46959 pipe_unlock(pipe);
46960 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46961 old_fs = get_fs();
46962 set_fs(get_ds());
46963 /* The cast to a user pointer is valid due to the set_fs() */
46964 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46965 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46966 set_fs(old_fs);
46967
46968 return res;
46969 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46970 old_fs = get_fs();
46971 set_fs(get_ds());
46972 /* The cast to a user pointer is valid due to the set_fs() */
46973 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46974 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46975 set_fs(old_fs);
46976
46977 return res;
46978 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46979 goto err;
46980
46981 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46982 - vec[i].iov_base = (void __user *) page_address(page);
46983 + vec[i].iov_base = (void __force_user *) page_address(page);
46984 vec[i].iov_len = this_len;
46985 spd.pages[i] = page;
46986 spd.nr_pages++;
46987 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46988 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46989 {
46990 while (!pipe->nrbufs) {
46991 - if (!pipe->writers)
46992 + if (!atomic_read(&pipe->writers))
46993 return 0;
46994
46995 - if (!pipe->waiting_writers && sd->num_spliced)
46996 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46997 return 0;
46998
46999 if (sd->flags & SPLICE_F_NONBLOCK)
47000 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
47001 * out of the pipe right after the splice_to_pipe(). So set
47002 * PIPE_READERS appropriately.
47003 */
47004 - pipe->readers = 1;
47005 + atomic_set(&pipe->readers, 1);
47006
47007 current->splice_pipe = pipe;
47008 }
47009 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47010 ret = -ERESTARTSYS;
47011 break;
47012 }
47013 - if (!pipe->writers)
47014 + if (!atomic_read(&pipe->writers))
47015 break;
47016 - if (!pipe->waiting_writers) {
47017 + if (!atomic_read(&pipe->waiting_writers)) {
47018 if (flags & SPLICE_F_NONBLOCK) {
47019 ret = -EAGAIN;
47020 break;
47021 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47022 pipe_lock(pipe);
47023
47024 while (pipe->nrbufs >= pipe->buffers) {
47025 - if (!pipe->readers) {
47026 + if (!atomic_read(&pipe->readers)) {
47027 send_sig(SIGPIPE, current, 0);
47028 ret = -EPIPE;
47029 break;
47030 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47031 ret = -ERESTARTSYS;
47032 break;
47033 }
47034 - pipe->waiting_writers++;
47035 + atomic_inc(&pipe->waiting_writers);
47036 pipe_wait(pipe);
47037 - pipe->waiting_writers--;
47038 + atomic_dec(&pipe->waiting_writers);
47039 }
47040
47041 pipe_unlock(pipe);
47042 @@ -1819,14 +1819,14 @@ retry:
47043 pipe_double_lock(ipipe, opipe);
47044
47045 do {
47046 - if (!opipe->readers) {
47047 + if (!atomic_read(&opipe->readers)) {
47048 send_sig(SIGPIPE, current, 0);
47049 if (!ret)
47050 ret = -EPIPE;
47051 break;
47052 }
47053
47054 - if (!ipipe->nrbufs && !ipipe->writers)
47055 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
47056 break;
47057
47058 /*
47059 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47060 pipe_double_lock(ipipe, opipe);
47061
47062 do {
47063 - if (!opipe->readers) {
47064 + if (!atomic_read(&opipe->readers)) {
47065 send_sig(SIGPIPE, current, 0);
47066 if (!ret)
47067 ret = -EPIPE;
47068 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47069 * return EAGAIN if we have the potential of some data in the
47070 * future, otherwise just return 0
47071 */
47072 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
47073 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
47074 ret = -EAGAIN;
47075
47076 pipe_unlock(ipipe);
47077 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
47078 index 7fdf6a7..e6cd8ad 100644
47079 --- a/fs/sysfs/dir.c
47080 +++ b/fs/sysfs/dir.c
47081 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
47082 struct sysfs_dirent *sd;
47083 int rc;
47084
47085 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47086 + const char *parent_name = parent_sd->s_name;
47087 +
47088 + mode = S_IFDIR | S_IRWXU;
47089 +
47090 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
47091 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
47092 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
47093 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
47094 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
47095 +#endif
47096 +
47097 /* allocate */
47098 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
47099 if (!sd)
47100 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
47101 index 779789a..f58193c 100644
47102 --- a/fs/sysfs/file.c
47103 +++ b/fs/sysfs/file.c
47104 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
47105
47106 struct sysfs_open_dirent {
47107 atomic_t refcnt;
47108 - atomic_t event;
47109 + atomic_unchecked_t event;
47110 wait_queue_head_t poll;
47111 struct list_head buffers; /* goes through sysfs_buffer.list */
47112 };
47113 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
47114 if (!sysfs_get_active(attr_sd))
47115 return -ENODEV;
47116
47117 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
47118 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
47119 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
47120
47121 sysfs_put_active(attr_sd);
47122 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
47123 return -ENOMEM;
47124
47125 atomic_set(&new_od->refcnt, 0);
47126 - atomic_set(&new_od->event, 1);
47127 + atomic_set_unchecked(&new_od->event, 1);
47128 init_waitqueue_head(&new_od->poll);
47129 INIT_LIST_HEAD(&new_od->buffers);
47130 goto retry;
47131 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
47132
47133 sysfs_put_active(attr_sd);
47134
47135 - if (buffer->event != atomic_read(&od->event))
47136 + if (buffer->event != atomic_read_unchecked(&od->event))
47137 goto trigger;
47138
47139 return DEFAULT_POLLMASK;
47140 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
47141
47142 od = sd->s_attr.open;
47143 if (od) {
47144 - atomic_inc(&od->event);
47145 + atomic_inc_unchecked(&od->event);
47146 wake_up_interruptible(&od->poll);
47147 }
47148
47149 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
47150 index a7ac78f..02158e1 100644
47151 --- a/fs/sysfs/symlink.c
47152 +++ b/fs/sysfs/symlink.c
47153 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47154
47155 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47156 {
47157 - char *page = nd_get_link(nd);
47158 + const char *page = nd_get_link(nd);
47159 if (!IS_ERR(page))
47160 free_page((unsigned long)page);
47161 }
47162 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
47163 index c175b4d..8f36a16 100644
47164 --- a/fs/udf/misc.c
47165 +++ b/fs/udf/misc.c
47166 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
47167
47168 u8 udf_tag_checksum(const struct tag *t)
47169 {
47170 - u8 *data = (u8 *)t;
47171 + const u8 *data = (const u8 *)t;
47172 u8 checksum = 0;
47173 int i;
47174 for (i = 0; i < sizeof(struct tag); ++i)
47175 diff --git a/fs/utimes.c b/fs/utimes.c
47176 index ba653f3..06ea4b1 100644
47177 --- a/fs/utimes.c
47178 +++ b/fs/utimes.c
47179 @@ -1,6 +1,7 @@
47180 #include <linux/compiler.h>
47181 #include <linux/file.h>
47182 #include <linux/fs.h>
47183 +#include <linux/security.h>
47184 #include <linux/linkage.h>
47185 #include <linux/mount.h>
47186 #include <linux/namei.h>
47187 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47188 goto mnt_drop_write_and_out;
47189 }
47190 }
47191 +
47192 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47193 + error = -EACCES;
47194 + goto mnt_drop_write_and_out;
47195 + }
47196 +
47197 mutex_lock(&inode->i_mutex);
47198 error = notify_change(path->dentry, &newattrs);
47199 mutex_unlock(&inode->i_mutex);
47200 diff --git a/fs/xattr.c b/fs/xattr.c
47201 index 67583de..c5aad14 100644
47202 --- a/fs/xattr.c
47203 +++ b/fs/xattr.c
47204 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47205 * Extended attribute SET operations
47206 */
47207 static long
47208 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
47209 +setxattr(struct path *path, const char __user *name, const void __user *value,
47210 size_t size, int flags)
47211 {
47212 int error;
47213 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47214 return PTR_ERR(kvalue);
47215 }
47216
47217 - error = vfs_setxattr(d, kname, kvalue, size, flags);
47218 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47219 + error = -EACCES;
47220 + goto out;
47221 + }
47222 +
47223 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47224 +out:
47225 kfree(kvalue);
47226 return error;
47227 }
47228 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47229 return error;
47230 error = mnt_want_write(path.mnt);
47231 if (!error) {
47232 - error = setxattr(path.dentry, name, value, size, flags);
47233 + error = setxattr(&path, name, value, size, flags);
47234 mnt_drop_write(path.mnt);
47235 }
47236 path_put(&path);
47237 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47238 return error;
47239 error = mnt_want_write(path.mnt);
47240 if (!error) {
47241 - error = setxattr(path.dentry, name, value, size, flags);
47242 + error = setxattr(&path, name, value, size, flags);
47243 mnt_drop_write(path.mnt);
47244 }
47245 path_put(&path);
47246 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47247 const void __user *,value, size_t, size, int, flags)
47248 {
47249 struct file *f;
47250 - struct dentry *dentry;
47251 int error = -EBADF;
47252
47253 f = fget(fd);
47254 if (!f)
47255 return error;
47256 - dentry = f->f_path.dentry;
47257 - audit_inode(NULL, dentry);
47258 + audit_inode(NULL, f->f_path.dentry);
47259 error = mnt_want_write_file(f);
47260 if (!error) {
47261 - error = setxattr(dentry, name, value, size, flags);
47262 + error = setxattr(&f->f_path, name, value, size, flags);
47263 mnt_drop_write(f->f_path.mnt);
47264 }
47265 fput(f);
47266 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47267 index 8d5a506..7f62712 100644
47268 --- a/fs/xattr_acl.c
47269 +++ b/fs/xattr_acl.c
47270 @@ -17,8 +17,8 @@
47271 struct posix_acl *
47272 posix_acl_from_xattr(const void *value, size_t size)
47273 {
47274 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47275 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47276 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47277 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47278 int count;
47279 struct posix_acl *acl;
47280 struct posix_acl_entry *acl_e;
47281 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47282 index d0ab788..827999b 100644
47283 --- a/fs/xfs/xfs_bmap.c
47284 +++ b/fs/xfs/xfs_bmap.c
47285 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47286 int nmap,
47287 int ret_nmap);
47288 #else
47289 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47290 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47291 #endif /* DEBUG */
47292
47293 STATIC int
47294 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47295 index 79d05e8..e3e5861 100644
47296 --- a/fs/xfs/xfs_dir2_sf.c
47297 +++ b/fs/xfs/xfs_dir2_sf.c
47298 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47299 }
47300
47301 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47302 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47303 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47304 + char name[sfep->namelen];
47305 + memcpy(name, sfep->name, sfep->namelen);
47306 + if (filldir(dirent, name, sfep->namelen,
47307 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47308 + *offset = off & 0x7fffffff;
47309 + return 0;
47310 + }
47311 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47312 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47313 *offset = off & 0x7fffffff;
47314 return 0;
47315 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47316 index d99a905..9f88202 100644
47317 --- a/fs/xfs/xfs_ioctl.c
47318 +++ b/fs/xfs/xfs_ioctl.c
47319 @@ -128,7 +128,7 @@ xfs_find_handle(
47320 }
47321
47322 error = -EFAULT;
47323 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47324 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47325 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47326 goto out_put;
47327
47328 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47329 index 23ce927..e274cc1 100644
47330 --- a/fs/xfs/xfs_iops.c
47331 +++ b/fs/xfs/xfs_iops.c
47332 @@ -447,7 +447,7 @@ xfs_vn_put_link(
47333 struct nameidata *nd,
47334 void *p)
47335 {
47336 - char *s = nd_get_link(nd);
47337 + const char *s = nd_get_link(nd);
47338
47339 if (!IS_ERR(s))
47340 kfree(s);
47341 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47342 new file mode 100644
47343 index 0000000..41df561
47344 --- /dev/null
47345 +++ b/grsecurity/Kconfig
47346 @@ -0,0 +1,1075 @@
47347 +#
47348 +# grecurity configuration
47349 +#
47350 +
47351 +menu "Grsecurity"
47352 +
47353 +config GRKERNSEC
47354 + bool "Grsecurity"
47355 + select CRYPTO
47356 + select CRYPTO_SHA256
47357 + help
47358 + If you say Y here, you will be able to configure many features
47359 + that will enhance the security of your system. It is highly
47360 + recommended that you say Y here and read through the help
47361 + for each option so that you fully understand the features and
47362 + can evaluate their usefulness for your machine.
47363 +
47364 +choice
47365 + prompt "Security Level"
47366 + depends on GRKERNSEC
47367 + default GRKERNSEC_CUSTOM
47368 +
47369 +config GRKERNSEC_LOW
47370 + bool "Low"
47371 + select GRKERNSEC_LINK
47372 + select GRKERNSEC_FIFO
47373 + select GRKERNSEC_RANDNET
47374 + select GRKERNSEC_DMESG
47375 + select GRKERNSEC_CHROOT
47376 + select GRKERNSEC_CHROOT_CHDIR
47377 +
47378 + help
47379 + If you choose this option, several of the grsecurity options will
47380 + be enabled that will give you greater protection against a number
47381 + of attacks, while assuring that none of your software will have any
47382 + conflicts with the additional security measures. If you run a lot
47383 + of unusual software, or you are having problems with the higher
47384 + security levels, you should say Y here. With this option, the
47385 + following features are enabled:
47386 +
47387 + - Linking restrictions
47388 + - FIFO restrictions
47389 + - Restricted dmesg
47390 + - Enforced chdir("/") on chroot
47391 + - Runtime module disabling
47392 +
47393 +config GRKERNSEC_MEDIUM
47394 + bool "Medium"
47395 + select PAX
47396 + select PAX_EI_PAX
47397 + select PAX_PT_PAX_FLAGS
47398 + select PAX_HAVE_ACL_FLAGS
47399 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47400 + select GRKERNSEC_CHROOT
47401 + select GRKERNSEC_CHROOT_SYSCTL
47402 + select GRKERNSEC_LINK
47403 + select GRKERNSEC_FIFO
47404 + select GRKERNSEC_DMESG
47405 + select GRKERNSEC_RANDNET
47406 + select GRKERNSEC_FORKFAIL
47407 + select GRKERNSEC_TIME
47408 + select GRKERNSEC_SIGNAL
47409 + select GRKERNSEC_CHROOT
47410 + select GRKERNSEC_CHROOT_UNIX
47411 + select GRKERNSEC_CHROOT_MOUNT
47412 + select GRKERNSEC_CHROOT_PIVOT
47413 + select GRKERNSEC_CHROOT_DOUBLE
47414 + select GRKERNSEC_CHROOT_CHDIR
47415 + select GRKERNSEC_CHROOT_MKNOD
47416 + select GRKERNSEC_PROC
47417 + select GRKERNSEC_PROC_USERGROUP
47418 + select PAX_RANDUSTACK
47419 + select PAX_ASLR
47420 + select PAX_RANDMMAP
47421 + select PAX_REFCOUNT if (X86 || SPARC64)
47422 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47423 +
47424 + help
47425 + If you say Y here, several features in addition to those included
47426 + in the low additional security level will be enabled. These
47427 + features provide even more security to your system, though in rare
47428 + cases they may be incompatible with very old or poorly written
47429 + software. If you enable this option, make sure that your auth
47430 + service (identd) is running as gid 1001. With this option,
47431 + the following features (in addition to those provided in the
47432 + low additional security level) will be enabled:
47433 +
47434 + - Failed fork logging
47435 + - Time change logging
47436 + - Signal logging
47437 + - Deny mounts in chroot
47438 + - Deny double chrooting
47439 + - Deny sysctl writes in chroot
47440 + - Deny mknod in chroot
47441 + - Deny access to abstract AF_UNIX sockets out of chroot
47442 + - Deny pivot_root in chroot
47443 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47444 + - /proc restrictions with special GID set to 10 (usually wheel)
47445 + - Address Space Layout Randomization (ASLR)
47446 + - Prevent exploitation of most refcount overflows
47447 + - Bounds checking of copying between the kernel and userland
47448 +
47449 +config GRKERNSEC_HIGH
47450 + bool "High"
47451 + select GRKERNSEC_LINK
47452 + select GRKERNSEC_FIFO
47453 + select GRKERNSEC_DMESG
47454 + select GRKERNSEC_FORKFAIL
47455 + select GRKERNSEC_TIME
47456 + select GRKERNSEC_SIGNAL
47457 + select GRKERNSEC_CHROOT
47458 + select GRKERNSEC_CHROOT_SHMAT
47459 + select GRKERNSEC_CHROOT_UNIX
47460 + select GRKERNSEC_CHROOT_MOUNT
47461 + select GRKERNSEC_CHROOT_FCHDIR
47462 + select GRKERNSEC_CHROOT_PIVOT
47463 + select GRKERNSEC_CHROOT_DOUBLE
47464 + select GRKERNSEC_CHROOT_CHDIR
47465 + select GRKERNSEC_CHROOT_MKNOD
47466 + select GRKERNSEC_CHROOT_CAPS
47467 + select GRKERNSEC_CHROOT_SYSCTL
47468 + select GRKERNSEC_CHROOT_FINDTASK
47469 + select GRKERNSEC_SYSFS_RESTRICT
47470 + select GRKERNSEC_PROC
47471 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47472 + select GRKERNSEC_HIDESYM
47473 + select GRKERNSEC_BRUTE
47474 + select GRKERNSEC_PROC_USERGROUP
47475 + select GRKERNSEC_KMEM
47476 + select GRKERNSEC_RESLOG
47477 + select GRKERNSEC_RANDNET
47478 + select GRKERNSEC_PROC_ADD
47479 + select GRKERNSEC_CHROOT_CHMOD
47480 + select GRKERNSEC_CHROOT_NICE
47481 + select GRKERNSEC_SETXID
47482 + select GRKERNSEC_AUDIT_MOUNT
47483 + select GRKERNSEC_MODHARDEN if (MODULES)
47484 + select GRKERNSEC_HARDEN_PTRACE
47485 + select GRKERNSEC_PTRACE_READEXEC
47486 + select GRKERNSEC_VM86 if (X86_32)
47487 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47488 + select PAX
47489 + select PAX_RANDUSTACK
47490 + select PAX_ASLR
47491 + select PAX_RANDMMAP
47492 + select PAX_NOEXEC
47493 + select PAX_MPROTECT
47494 + select PAX_EI_PAX
47495 + select PAX_PT_PAX_FLAGS
47496 + select PAX_HAVE_ACL_FLAGS
47497 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47498 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
47499 + select PAX_RANDKSTACK if (X86_TSC && X86)
47500 + select PAX_SEGMEXEC if (X86_32)
47501 + select PAX_PAGEEXEC
47502 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47503 + select PAX_EMUTRAMP if (PARISC)
47504 + select PAX_EMUSIGRT if (PARISC)
47505 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47506 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47507 + select PAX_REFCOUNT if (X86 || SPARC64)
47508 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47509 + help
47510 + If you say Y here, many of the features of grsecurity will be
47511 + enabled, which will protect you against many kinds of attacks
47512 + against your system. The heightened security comes at a cost
47513 + of an increased chance of incompatibilities with rare software
47514 + on your machine. Since this security level enables PaX, you should
47515 + view <http://pax.grsecurity.net> and read about the PaX
47516 + project. While you are there, download chpax and run it on
47517 + binaries that cause problems with PaX. Also remember that
47518 + since the /proc restrictions are enabled, you must run your
47519 + identd as gid 1001. This security level enables the following
47520 + features in addition to those listed in the low and medium
47521 + security levels:
47522 +
47523 + - Additional /proc restrictions
47524 + - Chmod restrictions in chroot
47525 + - No signals, ptrace, or viewing of processes outside of chroot
47526 + - Capability restrictions in chroot
47527 + - Deny fchdir out of chroot
47528 + - Priority restrictions in chroot
47529 + - Segmentation-based implementation of PaX
47530 + - Mprotect restrictions
47531 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47532 + - Kernel stack randomization
47533 + - Mount/unmount/remount logging
47534 + - Kernel symbol hiding
47535 + - Hardening of module auto-loading
47536 + - Ptrace restrictions
47537 + - Restricted vm86 mode
47538 + - Restricted sysfs/debugfs
47539 + - Active kernel exploit response
47540 +
47541 +config GRKERNSEC_CUSTOM
47542 + bool "Custom"
47543 + help
47544 + If you say Y here, you will be able to configure every grsecurity
47545 + option, which allows you to enable many more features that aren't
47546 + covered in the basic security levels. These additional features
47547 + include TPE, socket restrictions, and the sysctl system for
47548 + grsecurity. It is advised that you read through the help for
47549 + each option to determine its usefulness in your situation.
47550 +
47551 +endchoice
47552 +
47553 +menu "Memory Protections"
47554 +depends on GRKERNSEC
47555 +
47556 +config GRKERNSEC_KMEM
47557 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47558 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47559 + help
47560 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47561 + be written to or read from to modify or leak the contents of the running
47562 + kernel. /dev/port will also not be allowed to be opened. If you have module
47563 + support disabled, enabling this will close up four ways that are
47564 + currently used to insert malicious code into the running kernel.
47565 + Even with all these features enabled, we still highly recommend that
47566 + you use the RBAC system, as it is still possible for an attacker to
47567 + modify the running kernel through privileged I/O granted by ioperm/iopl.
47568 + If you are not using XFree86, you may be able to stop this additional
47569 + case by enabling the 'Disable privileged I/O' option. Though nothing
47570 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47571 + but only to video memory, which is the only writing we allow in this
47572 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47573 + not be allowed to mprotect it with PROT_WRITE later.
47574 + It is highly recommended that you say Y here if you meet all the
47575 + conditions above.
47576 +
47577 +config GRKERNSEC_VM86
47578 + bool "Restrict VM86 mode"
47579 + depends on X86_32
47580 +
47581 + help
47582 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47583 + make use of a special execution mode on 32bit x86 processors called
47584 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47585 + video cards and will still work with this option enabled. The purpose
47586 + of the option is to prevent exploitation of emulation errors in
47587 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
47588 + Nearly all users should be able to enable this option.
47589 +
47590 +config GRKERNSEC_IO
47591 + bool "Disable privileged I/O"
47592 + depends on X86
47593 + select RTC_CLASS
47594 + select RTC_INTF_DEV
47595 + select RTC_DRV_CMOS
47596 +
47597 + help
47598 + If you say Y here, all ioperm and iopl calls will return an error.
47599 + Ioperm and iopl can be used to modify the running kernel.
47600 + Unfortunately, some programs need this access to operate properly,
47601 + the most notable of which are XFree86 and hwclock. hwclock can be
47602 + remedied by having RTC support in the kernel, so real-time
47603 + clock support is enabled if this option is enabled, to ensure
47604 + that hwclock operates correctly. XFree86 still will not
47605 + operate correctly with this option enabled, so DO NOT CHOOSE Y
47606 + IF YOU USE XFree86. If you use XFree86 and you still want to
47607 + protect your kernel against modification, use the RBAC system.
47608 +
47609 +config GRKERNSEC_PROC_MEMMAP
47610 + bool "Harden ASLR against information leaks and entropy reduction"
47611 + default y if (PAX_NOEXEC || PAX_ASLR)
47612 + depends on PAX_NOEXEC || PAX_ASLR
47613 + help
47614 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47615 + give no information about the addresses of its mappings if
47616 + PaX features that rely on random addresses are enabled on the task.
47617 + In addition to sanitizing this information and disabling other
47618 + dangerous sources of information, this option causes reads of sensitive
47619 + /proc/<pid> entries where the file descriptor was opened in a different
47620 + task than the one performing the read. Such attempts are logged.
47621 + Finally, this option limits argv/env strings for suid/sgid binaries
47622 + to 1MB to prevent a complete exhaustion of the stack entropy provided
47623 + by ASLR.
47624 + If you use PaX it is essential that you say Y here as it closes up
47625 + several holes that make full ASLR useless for suid/sgid binaries.
47626 +
47627 +config GRKERNSEC_BRUTE
47628 + bool "Deter exploit bruteforcing"
47629 + help
47630 + If you say Y here, attempts to bruteforce exploits against forking
47631 + daemons such as apache or sshd, as well as against suid/sgid binaries
47632 + will be deterred. When a child of a forking daemon is killed by PaX
47633 + or crashes due to an illegal instruction or other suspicious signal,
47634 + the parent process will be delayed 30 seconds upon every subsequent
47635 + fork until the administrator is able to assess the situation and
47636 + restart the daemon.
47637 + In the suid/sgid case, the attempt is logged, the user has all their
47638 + processes terminated, and they are prevented from executing any further
47639 + processes for 15 minutes.
47640 + It is recommended that you also enable signal logging in the auditing
47641 + section so that logs are generated when a process triggers a suspicious
47642 + signal.
47643 + If the sysctl option is enabled, a sysctl option with name
47644 + "deter_bruteforce" is created.
47645 +
47646 +
47647 +config GRKERNSEC_MODHARDEN
47648 + bool "Harden module auto-loading"
47649 + depends on MODULES
47650 + help
47651 + If you say Y here, module auto-loading in response to use of some
47652 + feature implemented by an unloaded module will be restricted to
47653 + root users. Enabling this option helps defend against attacks
47654 + by unprivileged users who abuse the auto-loading behavior to
47655 + cause a vulnerable module to load that is then exploited.
47656 +
47657 + If this option prevents a legitimate use of auto-loading for a
47658 + non-root user, the administrator can execute modprobe manually
47659 + with the exact name of the module mentioned in the alert log.
47660 + Alternatively, the administrator can add the module to the list
47661 + of modules loaded at boot by modifying init scripts.
47662 +
47663 + Modification of init scripts will most likely be needed on
47664 + Ubuntu servers with encrypted home directory support enabled,
47665 + as the first non-root user logging in will cause the ecb(aes),
47666 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47667 +
47668 +config GRKERNSEC_HIDESYM
47669 + bool "Hide kernel symbols"
47670 + help
47671 + If you say Y here, getting information on loaded modules, and
47672 + displaying all kernel symbols through a syscall will be restricted
47673 + to users with CAP_SYS_MODULE. For software compatibility reasons,
47674 + /proc/kallsyms will be restricted to the root user. The RBAC
47675 + system can hide that entry even from root.
47676 +
47677 + This option also prevents leaking of kernel addresses through
47678 + several /proc entries.
47679 +
47680 + Note that this option is only effective provided the following
47681 + conditions are met:
47682 + 1) The kernel using grsecurity is not precompiled by some distribution
47683 + 2) You have also enabled GRKERNSEC_DMESG
47684 + 3) You are using the RBAC system and hiding other files such as your
47685 + kernel image and System.map. Alternatively, enabling this option
47686 + causes the permissions on /boot, /lib/modules, and the kernel
47687 + source directory to change at compile time to prevent
47688 + reading by non-root users.
47689 + If the above conditions are met, this option will aid in providing a
47690 + useful protection against local kernel exploitation of overflows
47691 + and arbitrary read/write vulnerabilities.
47692 +
47693 +config GRKERNSEC_KERN_LOCKOUT
47694 + bool "Active kernel exploit response"
47695 + depends on X86 || ARM || PPC || SPARC
47696 + help
47697 + If you say Y here, when a PaX alert is triggered due to suspicious
47698 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47699 + or an OOPs occurs due to bad memory accesses, instead of just
47700 + terminating the offending process (and potentially allowing
47701 + a subsequent exploit from the same user), we will take one of two
47702 + actions:
47703 + If the user was root, we will panic the system
47704 + If the user was non-root, we will log the attempt, terminate
47705 + all processes owned by the user, then prevent them from creating
47706 + any new processes until the system is restarted
47707 + This deters repeated kernel exploitation/bruteforcing attempts
47708 + and is useful for later forensics.
47709 +
47710 +endmenu
47711 +menu "Role Based Access Control Options"
47712 +depends on GRKERNSEC
47713 +
47714 +config GRKERNSEC_RBAC_DEBUG
47715 + bool
47716 +
47717 +config GRKERNSEC_NO_RBAC
47718 + bool "Disable RBAC system"
47719 + help
47720 + If you say Y here, the /dev/grsec device will be removed from the kernel,
47721 + preventing the RBAC system from being enabled. You should only say Y
47722 + here if you have no intention of using the RBAC system, so as to prevent
47723 + an attacker with root access from misusing the RBAC system to hide files
47724 + and processes when loadable module support and /dev/[k]mem have been
47725 + locked down.
47726 +
47727 +config GRKERNSEC_ACL_HIDEKERN
47728 + bool "Hide kernel processes"
47729 + help
47730 + If you say Y here, all kernel threads will be hidden to all
47731 + processes but those whose subject has the "view hidden processes"
47732 + flag.
47733 +
47734 +config GRKERNSEC_ACL_MAXTRIES
47735 + int "Maximum tries before password lockout"
47736 + default 3
47737 + help
47738 + This option enforces the maximum number of times a user can attempt
47739 + to authorize themselves with the grsecurity RBAC system before being
47740 + denied the ability to attempt authorization again for a specified time.
47741 + The lower the number, the harder it will be to brute-force a password.
47742 +
47743 +config GRKERNSEC_ACL_TIMEOUT
47744 + int "Time to wait after max password tries, in seconds"
47745 + default 30
47746 + help
47747 + This option specifies the time the user must wait after attempting to
47748 + authorize to the RBAC system with the maximum number of invalid
47749 + passwords. The higher the number, the harder it will be to brute-force
47750 + a password.
47751 +
47752 +endmenu
47753 +menu "Filesystem Protections"
47754 +depends on GRKERNSEC
47755 +
47756 +config GRKERNSEC_PROC
47757 + bool "Proc restrictions"
47758 + help
47759 + If you say Y here, the permissions of the /proc filesystem
47760 + will be altered to enhance system security and privacy. You MUST
47761 + choose either a user only restriction or a user and group restriction.
47762 + Depending upon the option you choose, you can either restrict users to
47763 + see only the processes they themselves run, or choose a group that can
47764 + view all processes and files normally restricted to root if you choose
47765 + the "restrict to user only" option. NOTE: If you're running identd as
47766 + a non-root user, you will have to run it as the group you specify here.
47767 +
47768 +config GRKERNSEC_PROC_USER
47769 + bool "Restrict /proc to user only"
47770 + depends on GRKERNSEC_PROC
47771 + help
47772 + If you say Y here, non-root users will only be able to view their own
47773 + processes, and restricts them from viewing network-related information,
47774 + and viewing kernel symbol and module information.
47775 +
47776 +config GRKERNSEC_PROC_USERGROUP
47777 + bool "Allow special group"
47778 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47779 + help
47780 + If you say Y here, you will be able to select a group that will be
47781 + able to view all processes and network-related information. If you've
47782 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47783 + remain hidden. This option is useful if you want to run identd as
47784 + a non-root user.
47785 +
47786 +config GRKERNSEC_PROC_GID
47787 + int "GID for special group"
47788 + depends on GRKERNSEC_PROC_USERGROUP
47789 + default 1001
47790 +
47791 +config GRKERNSEC_PROC_ADD
47792 + bool "Additional restrictions"
47793 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47794 + help
47795 + If you say Y here, additional restrictions will be placed on
47796 + /proc that keep normal users from viewing device information and
47797 + slabinfo information that could be useful for exploits.
47798 +
47799 +config GRKERNSEC_LINK
47800 + bool "Linking restrictions"
47801 + help
47802 + If you say Y here, /tmp race exploits will be prevented, since users
47803 + will no longer be able to follow symlinks owned by other users in
47804 + world-writable +t directories (e.g. /tmp), unless the owner of the
47805 + symlink is the owner of the directory. users will also not be
47806 + able to hardlink to files they do not own. If the sysctl option is
47807 + enabled, a sysctl option with name "linking_restrictions" is created.
47808 +
47809 +config GRKERNSEC_FIFO
47810 + bool "FIFO restrictions"
47811 + help
47812 + If you say Y here, users will not be able to write to FIFOs they don't
47813 + own in world-writable +t directories (e.g. /tmp), unless the owner of
47814 + the FIFO is the same owner of the directory it's held in. If the sysctl
47815 + option is enabled, a sysctl option with name "fifo_restrictions" is
47816 + created.
47817 +
47818 +config GRKERNSEC_SYSFS_RESTRICT
47819 + bool "Sysfs/debugfs restriction"
47820 + depends on SYSFS
47821 + help
47822 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47823 + any filesystem normally mounted under it (e.g. debugfs) will be
47824 + mostly accessible only by root. These filesystems generally provide access
47825 + to hardware and debug information that isn't appropriate for unprivileged
47826 + users of the system. Sysfs and debugfs have also become a large source
47827 + of new vulnerabilities, ranging from infoleaks to local compromise.
47828 + There has been very little oversight with an eye toward security involved
47829 + in adding new exporters of information to these filesystems, so their
47830 + use is discouraged.
47831 + For reasons of compatibility, a few directories have been whitelisted
47832 + for access by non-root users:
47833 + /sys/fs/selinux
47834 + /sys/fs/fuse
47835 + /sys/devices/system/cpu
47836 +
47837 +config GRKERNSEC_ROFS
47838 + bool "Runtime read-only mount protection"
47839 + help
47840 + If you say Y here, a sysctl option with name "romount_protect" will
47841 + be created. By setting this option to 1 at runtime, filesystems
47842 + will be protected in the following ways:
47843 + * No new writable mounts will be allowed
47844 + * Existing read-only mounts won't be able to be remounted read/write
47845 + * Write operations will be denied on all block devices
47846 + This option acts independently of grsec_lock: once it is set to 1,
47847 + it cannot be turned off. Therefore, please be mindful of the resulting
47848 + behavior if this option is enabled in an init script on a read-only
47849 + filesystem. This feature is mainly intended for secure embedded systems.
47850 +
47851 +config GRKERNSEC_CHROOT
47852 + bool "Chroot jail restrictions"
47853 + help
47854 + If you say Y here, you will be able to choose several options that will
47855 + make breaking out of a chrooted jail much more difficult. If you
47856 + encounter no software incompatibilities with the following options, it
47857 + is recommended that you enable each one.
47858 +
47859 +config GRKERNSEC_CHROOT_MOUNT
47860 + bool "Deny mounts"
47861 + depends on GRKERNSEC_CHROOT
47862 + help
47863 + If you say Y here, processes inside a chroot will not be able to
47864 + mount or remount filesystems. If the sysctl option is enabled, a
47865 + sysctl option with name "chroot_deny_mount" is created.
47866 +
47867 +config GRKERNSEC_CHROOT_DOUBLE
47868 + bool "Deny double-chroots"
47869 + depends on GRKERNSEC_CHROOT
47870 + help
47871 + If you say Y here, processes inside a chroot will not be able to chroot
47872 + again outside the chroot. This is a widely used method of breaking
47873 + out of a chroot jail and should not be allowed. If the sysctl
47874 + option is enabled, a sysctl option with name
47875 + "chroot_deny_chroot" is created.
47876 +
47877 +config GRKERNSEC_CHROOT_PIVOT
47878 + bool "Deny pivot_root in chroot"
47879 + depends on GRKERNSEC_CHROOT
47880 + help
47881 + If you say Y here, processes inside a chroot will not be able to use
47882 + a function called pivot_root() that was introduced in Linux 2.3.41. It
47883 + works similar to chroot in that it changes the root filesystem. This
47884 + function could be misused in a chrooted process to attempt to break out
47885 + of the chroot, and therefore should not be allowed. If the sysctl
47886 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
47887 + created.
47888 +
47889 +config GRKERNSEC_CHROOT_CHDIR
47890 + bool "Enforce chdir(\"/\") on all chroots"
47891 + depends on GRKERNSEC_CHROOT
47892 + help
47893 + If you say Y here, the current working directory of all newly-chrooted
47894 + applications will be set to the the root directory of the chroot.
47895 + The man page on chroot(2) states:
47896 + Note that this call does not change the current working
47897 + directory, so that `.' can be outside the tree rooted at
47898 + `/'. In particular, the super-user can escape from a
47899 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47900 +
47901 + It is recommended that you say Y here, since it's not known to break
47902 + any software. If the sysctl option is enabled, a sysctl option with
47903 + name "chroot_enforce_chdir" is created.
47904 +
47905 +config GRKERNSEC_CHROOT_CHMOD
47906 + bool "Deny (f)chmod +s"
47907 + depends on GRKERNSEC_CHROOT
47908 + help
47909 + If you say Y here, processes inside a chroot will not be able to chmod
47910 + or fchmod files to make them have suid or sgid bits. This protects
47911 + against another published method of breaking a chroot. If the sysctl
47912 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
47913 + created.
47914 +
47915 +config GRKERNSEC_CHROOT_FCHDIR
47916 + bool "Deny fchdir out of chroot"
47917 + depends on GRKERNSEC_CHROOT
47918 + help
47919 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
47920 + to a file descriptor of the chrooting process that points to a directory
47921 + outside the filesystem will be stopped. If the sysctl option
47922 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47923 +
47924 +config GRKERNSEC_CHROOT_MKNOD
47925 + bool "Deny mknod"
47926 + depends on GRKERNSEC_CHROOT
47927 + help
47928 + If you say Y here, processes inside a chroot will not be allowed to
47929 + mknod. The problem with using mknod inside a chroot is that it
47930 + would allow an attacker to create a device entry that is the same
47931 + as one on the physical root of your system, which could range from
47932 + anything from the console device to a device for your harddrive (which
47933 + they could then use to wipe the drive or steal data). It is recommended
47934 + that you say Y here, unless you run into software incompatibilities.
47935 + If the sysctl option is enabled, a sysctl option with name
47936 + "chroot_deny_mknod" is created.
47937 +
47938 +config GRKERNSEC_CHROOT_SHMAT
47939 + bool "Deny shmat() out of chroot"
47940 + depends on GRKERNSEC_CHROOT
47941 + help
47942 + If you say Y here, processes inside a chroot will not be able to attach
47943 + to shared memory segments that were created outside of the chroot jail.
47944 + It is recommended that you say Y here. If the sysctl option is enabled,
47945 + a sysctl option with name "chroot_deny_shmat" is created.
47946 +
47947 +config GRKERNSEC_CHROOT_UNIX
47948 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
47949 + depends on GRKERNSEC_CHROOT
47950 + help
47951 + If you say Y here, processes inside a chroot will not be able to
47952 + connect to abstract (meaning not belonging to a filesystem) Unix
47953 + domain sockets that were bound outside of a chroot. It is recommended
47954 + that you say Y here. If the sysctl option is enabled, a sysctl option
47955 + with name "chroot_deny_unix" is created.
47956 +
47957 +config GRKERNSEC_CHROOT_FINDTASK
47958 + bool "Protect outside processes"
47959 + depends on GRKERNSEC_CHROOT
47960 + help
47961 + If you say Y here, processes inside a chroot will not be able to
47962 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47963 + getsid, or view any process outside of the chroot. If the sysctl
47964 + option is enabled, a sysctl option with name "chroot_findtask" is
47965 + created.
47966 +
47967 +config GRKERNSEC_CHROOT_NICE
47968 + bool "Restrict priority changes"
47969 + depends on GRKERNSEC_CHROOT
47970 + help
47971 + If you say Y here, processes inside a chroot will not be able to raise
47972 + the priority of processes in the chroot, or alter the priority of
47973 + processes outside the chroot. This provides more security than simply
47974 + removing CAP_SYS_NICE from the process' capability set. If the
47975 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47976 + is created.
47977 +
47978 +config GRKERNSEC_CHROOT_SYSCTL
47979 + bool "Deny sysctl writes"
47980 + depends on GRKERNSEC_CHROOT
47981 + help
47982 + If you say Y here, an attacker in a chroot will not be able to
47983 + write to sysctl entries, either by sysctl(2) or through a /proc
47984 + interface. It is strongly recommended that you say Y here. If the
47985 + sysctl option is enabled, a sysctl option with name
47986 + "chroot_deny_sysctl" is created.
47987 +
47988 +config GRKERNSEC_CHROOT_CAPS
47989 + bool "Capability restrictions"
47990 + depends on GRKERNSEC_CHROOT
47991 + help
47992 + If you say Y here, the capabilities on all processes within a
47993 + chroot jail will be lowered to stop module insertion, raw i/o,
47994 + system and net admin tasks, rebooting the system, modifying immutable
47995 + files, modifying IPC owned by another, and changing the system time.
47996 + This is left an option because it can break some apps. Disable this
47997 + if your chrooted apps are having problems performing those kinds of
47998 + tasks. If the sysctl option is enabled, a sysctl option with
47999 + name "chroot_caps" is created.
48000 +
48001 +endmenu
48002 +menu "Kernel Auditing"
48003 +depends on GRKERNSEC
48004 +
48005 +config GRKERNSEC_AUDIT_GROUP
48006 + bool "Single group for auditing"
48007 + help
48008 + If you say Y here, the exec, chdir, and (un)mount logging features
48009 + will only operate on a group you specify. This option is recommended
48010 + if you only want to watch certain users instead of having a large
48011 + amount of logs from the entire system. If the sysctl option is enabled,
48012 + a sysctl option with name "audit_group" is created.
48013 +
48014 +config GRKERNSEC_AUDIT_GID
48015 + int "GID for auditing"
48016 + depends on GRKERNSEC_AUDIT_GROUP
48017 + default 1007
48018 +
48019 +config GRKERNSEC_EXECLOG
48020 + bool "Exec logging"
48021 + help
48022 + If you say Y here, all execve() calls will be logged (since the
48023 + other exec*() calls are frontends to execve(), all execution
48024 + will be logged). Useful for shell-servers that like to keep track
48025 + of their users. If the sysctl option is enabled, a sysctl option with
48026 + name "exec_logging" is created.
48027 + WARNING: This option when enabled will produce a LOT of logs, especially
48028 + on an active system.
48029 +
48030 +config GRKERNSEC_RESLOG
48031 + bool "Resource logging"
48032 + help
48033 + If you say Y here, all attempts to overstep resource limits will
48034 + be logged with the resource name, the requested size, and the current
48035 + limit. It is highly recommended that you say Y here. If the sysctl
48036 + option is enabled, a sysctl option with name "resource_logging" is
48037 + created. If the RBAC system is enabled, the sysctl value is ignored.
48038 +
48039 +config GRKERNSEC_CHROOT_EXECLOG
48040 + bool "Log execs within chroot"
48041 + help
48042 + If you say Y here, all executions inside a chroot jail will be logged
48043 + to syslog. This can cause a large amount of logs if certain
48044 + applications (eg. djb's daemontools) are installed on the system, and
48045 + is therefore left as an option. If the sysctl option is enabled, a
48046 + sysctl option with name "chroot_execlog" is created.
48047 +
48048 +config GRKERNSEC_AUDIT_PTRACE
48049 + bool "Ptrace logging"
48050 + help
48051 + If you say Y here, all attempts to attach to a process via ptrace
48052 + will be logged. If the sysctl option is enabled, a sysctl option
48053 + with name "audit_ptrace" is created.
48054 +
48055 +config GRKERNSEC_AUDIT_CHDIR
48056 + bool "Chdir logging"
48057 + help
48058 + If you say Y here, all chdir() calls will be logged. If the sysctl
48059 + option is enabled, a sysctl option with name "audit_chdir" is created.
48060 +
48061 +config GRKERNSEC_AUDIT_MOUNT
48062 + bool "(Un)Mount logging"
48063 + help
48064 + If you say Y here, all mounts and unmounts will be logged. If the
48065 + sysctl option is enabled, a sysctl option with name "audit_mount" is
48066 + created.
48067 +
48068 +config GRKERNSEC_SIGNAL
48069 + bool "Signal logging"
48070 + help
48071 + If you say Y here, certain important signals will be logged, such as
48072 + SIGSEGV, which will as a result inform you of when a error in a program
48073 + occurred, which in some cases could mean a possible exploit attempt.
48074 + If the sysctl option is enabled, a sysctl option with name
48075 + "signal_logging" is created.
48076 +
48077 +config GRKERNSEC_FORKFAIL
48078 + bool "Fork failure logging"
48079 + help
48080 + If you say Y here, all failed fork() attempts will be logged.
48081 + This could suggest a fork bomb, or someone attempting to overstep
48082 + their process limit. If the sysctl option is enabled, a sysctl option
48083 + with name "forkfail_logging" is created.
48084 +
48085 +config GRKERNSEC_TIME
48086 + bool "Time change logging"
48087 + help
48088 + If you say Y here, any changes of the system clock will be logged.
48089 + If the sysctl option is enabled, a sysctl option with name
48090 + "timechange_logging" is created.
48091 +
48092 +config GRKERNSEC_PROC_IPADDR
48093 + bool "/proc/<pid>/ipaddr support"
48094 + help
48095 + If you say Y here, a new entry will be added to each /proc/<pid>
48096 + directory that contains the IP address of the person using the task.
48097 + The IP is carried across local TCP and AF_UNIX stream sockets.
48098 + This information can be useful for IDS/IPSes to perform remote response
48099 + to a local attack. The entry is readable by only the owner of the
48100 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
48101 + the RBAC system), and thus does not create privacy concerns.
48102 +
48103 +config GRKERNSEC_RWXMAP_LOG
48104 + bool 'Denied RWX mmap/mprotect logging'
48105 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
48106 + help
48107 + If you say Y here, calls to mmap() and mprotect() with explicit
48108 + usage of PROT_WRITE and PROT_EXEC together will be logged when
48109 + denied by the PAX_MPROTECT feature. If the sysctl option is
48110 + enabled, a sysctl option with name "rwxmap_logging" is created.
48111 +
48112 +config GRKERNSEC_AUDIT_TEXTREL
48113 + bool 'ELF text relocations logging (READ HELP)'
48114 + depends on PAX_MPROTECT
48115 + help
48116 + If you say Y here, text relocations will be logged with the filename
48117 + of the offending library or binary. The purpose of the feature is
48118 + to help Linux distribution developers get rid of libraries and
48119 + binaries that need text relocations which hinder the future progress
48120 + of PaX. Only Linux distribution developers should say Y here, and
48121 + never on a production machine, as this option creates an information
48122 + leak that could aid an attacker in defeating the randomization of
48123 + a single memory region. If the sysctl option is enabled, a sysctl
48124 + option with name "audit_textrel" is created.
48125 +
48126 +endmenu
48127 +
48128 +menu "Executable Protections"
48129 +depends on GRKERNSEC
48130 +
48131 +config GRKERNSEC_DMESG
48132 + bool "Dmesg(8) restriction"
48133 + help
48134 + If you say Y here, non-root users will not be able to use dmesg(8)
48135 + to view up to the last 4kb of messages in the kernel's log buffer.
48136 + The kernel's log buffer often contains kernel addresses and other
48137 + identifying information useful to an attacker in fingerprinting a
48138 + system for a targeted exploit.
48139 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
48140 + created.
48141 +
48142 +config GRKERNSEC_HARDEN_PTRACE
48143 + bool "Deter ptrace-based process snooping"
48144 + help
48145 + If you say Y here, TTY sniffers and other malicious monitoring
48146 + programs implemented through ptrace will be defeated. If you
48147 + have been using the RBAC system, this option has already been
48148 + enabled for several years for all users, with the ability to make
48149 + fine-grained exceptions.
48150 +
48151 + This option only affects the ability of non-root users to ptrace
48152 + processes that are not a descendent of the ptracing process.
48153 + This means that strace ./binary and gdb ./binary will still work,
48154 + but attaching to arbitrary processes will not. If the sysctl
48155 + option is enabled, a sysctl option with name "harden_ptrace" is
48156 + created.
48157 +
48158 +config GRKERNSEC_PTRACE_READEXEC
48159 + bool "Require read access to ptrace sensitive binaries"
48160 + help
48161 + If you say Y here, unprivileged users will not be able to ptrace unreadable
48162 + binaries. This option is useful in environments that
48163 + remove the read bits (e.g. file mode 4711) from suid binaries to
48164 + prevent infoleaking of their contents. This option adds
48165 + consistency to the use of that file mode, as the binary could normally
48166 + be read out when run without privileges while ptracing.
48167 +
48168 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
48169 + is created.
48170 +
48171 +config GRKERNSEC_SETXID
48172 + bool "Enforce consistent multithreaded privileges"
48173 + help
48174 + If you say Y here, a change from a root uid to a non-root uid
48175 + in a multithreaded application will cause the resulting uids,
48176 + gids, supplementary groups, and capabilities in that thread
48177 + to be propagated to the other threads of the process. In most
48178 + cases this is unnecessary, as glibc will emulate this behavior
48179 + on behalf of the application. Other libcs do not act in the
48180 + same way, allowing the other threads of the process to continue
48181 + running with root privileges. If the sysctl option is enabled,
48182 + a sysctl option with name "consistent_setxid" is created.
48183 +
48184 +config GRKERNSEC_TPE
48185 + bool "Trusted Path Execution (TPE)"
48186 + help
48187 + If you say Y here, you will be able to choose a gid to add to the
48188 + supplementary groups of users you want to mark as "untrusted."
48189 + These users will not be able to execute any files that are not in
48190 + root-owned directories writable only by root. If the sysctl option
48191 + is enabled, a sysctl option with name "tpe" is created.
48192 +
48193 +config GRKERNSEC_TPE_ALL
48194 + bool "Partially restrict all non-root users"
48195 + depends on GRKERNSEC_TPE
48196 + help
48197 + If you say Y here, all non-root users will be covered under
48198 + a weaker TPE restriction. This is separate from, and in addition to,
48199 + the main TPE options that you have selected elsewhere. Thus, if a
48200 + "trusted" GID is chosen, this restriction applies to even that GID.
48201 + Under this restriction, all non-root users will only be allowed to
48202 + execute files in directories they own that are not group or
48203 + world-writable, or in directories owned by root and writable only by
48204 + root. If the sysctl option is enabled, a sysctl option with name
48205 + "tpe_restrict_all" is created.
48206 +
48207 +config GRKERNSEC_TPE_INVERT
48208 + bool "Invert GID option"
48209 + depends on GRKERNSEC_TPE
48210 + help
48211 + If you say Y here, the group you specify in the TPE configuration will
48212 + decide what group TPE restrictions will be *disabled* for. This
48213 + option is useful if you want TPE restrictions to be applied to most
48214 + users on the system. If the sysctl option is enabled, a sysctl option
48215 + with name "tpe_invert" is created. Unlike other sysctl options, this
48216 + entry will default to on for backward-compatibility.
48217 +
48218 +config GRKERNSEC_TPE_GID
48219 + int "GID for untrusted users"
48220 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48221 + default 1005
48222 + help
48223 + Setting this GID determines what group TPE restrictions will be
48224 + *enabled* for. If the sysctl option is enabled, a sysctl option
48225 + with name "tpe_gid" is created.
48226 +
48227 +config GRKERNSEC_TPE_GID
48228 + int "GID for trusted users"
48229 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48230 + default 1005
48231 + help
48232 + Setting this GID determines what group TPE restrictions will be
48233 + *disabled* for. If the sysctl option is enabled, a sysctl option
48234 + with name "tpe_gid" is created.
48235 +
48236 +endmenu
48237 +menu "Network Protections"
48238 +depends on GRKERNSEC
48239 +
48240 +config GRKERNSEC_RANDNET
48241 + bool "Larger entropy pools"
48242 + help
48243 + If you say Y here, the entropy pools used for many features of Linux
48244 + and grsecurity will be doubled in size. Since several grsecurity
48245 + features use additional randomness, it is recommended that you say Y
48246 + here. Saying Y here has a similar effect as modifying
48247 + /proc/sys/kernel/random/poolsize.
48248 +
48249 +config GRKERNSEC_BLACKHOLE
48250 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48251 + depends on NET
48252 + help
48253 + If you say Y here, neither TCP resets nor ICMP
48254 + destination-unreachable packets will be sent in response to packets
48255 + sent to ports for which no associated listening process exists.
48256 + This feature supports both IPV4 and IPV6 and exempts the
48257 + loopback interface from blackholing. Enabling this feature
48258 + makes a host more resilient to DoS attacks and reduces network
48259 + visibility against scanners.
48260 +
48261 + The blackhole feature as-implemented is equivalent to the FreeBSD
48262 + blackhole feature, as it prevents RST responses to all packets, not
48263 + just SYNs. Under most application behavior this causes no
48264 + problems, but applications (like haproxy) may not close certain
48265 + connections in a way that cleanly terminates them on the remote
48266 + end, leaving the remote host in LAST_ACK state. Because of this
48267 + side-effect and to prevent intentional LAST_ACK DoSes, this
48268 + feature also adds automatic mitigation against such attacks.
48269 + The mitigation drastically reduces the amount of time a socket
48270 + can spend in LAST_ACK state. If you're using haproxy and not
48271 + all servers it connects to have this option enabled, consider
48272 + disabling this feature on the haproxy host.
48273 +
48274 + If the sysctl option is enabled, two sysctl options with names
48275 + "ip_blackhole" and "lastack_retries" will be created.
48276 + While "ip_blackhole" takes the standard zero/non-zero on/off
48277 + toggle, "lastack_retries" uses the same kinds of values as
48278 + "tcp_retries1" and "tcp_retries2". The default value of 4
48279 + prevents a socket from lasting more than 45 seconds in LAST_ACK
48280 + state.
48281 +
48282 +config GRKERNSEC_SOCKET
48283 + bool "Socket restrictions"
48284 + depends on NET
48285 + help
48286 + If you say Y here, you will be able to choose from several options.
48287 + If you assign a GID on your system and add it to the supplementary
48288 + groups of users you want to restrict socket access to, this patch
48289 + will perform up to three things, based on the option(s) you choose.
48290 +
48291 +config GRKERNSEC_SOCKET_ALL
48292 + bool "Deny any sockets to group"
48293 + depends on GRKERNSEC_SOCKET
48294 + help
48295 + If you say Y here, you will be able to choose a GID of whose users will
48296 + be unable to connect to other hosts from your machine or run server
48297 + applications from your machine. If the sysctl option is enabled, a
48298 + sysctl option with name "socket_all" is created.
48299 +
48300 +config GRKERNSEC_SOCKET_ALL_GID
48301 + int "GID to deny all sockets for"
48302 + depends on GRKERNSEC_SOCKET_ALL
48303 + default 1004
48304 + help
48305 + Here you can choose the GID to disable socket access for. Remember to
48306 + add the users you want socket access disabled for to the GID
48307 + specified here. If the sysctl option is enabled, a sysctl option
48308 + with name "socket_all_gid" is created.
48309 +
48310 +config GRKERNSEC_SOCKET_CLIENT
48311 + bool "Deny client sockets to group"
48312 + depends on GRKERNSEC_SOCKET
48313 + help
48314 + If you say Y here, you will be able to choose a GID of whose users will
48315 + be unable to connect to other hosts from your machine, but will be
48316 + able to run servers. If this option is enabled, all users in the group
48317 + you specify will have to use passive mode when initiating ftp transfers
48318 + from the shell on your machine. If the sysctl option is enabled, a
48319 + sysctl option with name "socket_client" is created.
48320 +
48321 +config GRKERNSEC_SOCKET_CLIENT_GID
48322 + int "GID to deny client sockets for"
48323 + depends on GRKERNSEC_SOCKET_CLIENT
48324 + default 1003
48325 + help
48326 + Here you can choose the GID to disable client socket access for.
48327 + Remember to add the users you want client socket access disabled for to
48328 + the GID specified here. If the sysctl option is enabled, a sysctl
48329 + option with name "socket_client_gid" is created.
48330 +
48331 +config GRKERNSEC_SOCKET_SERVER
48332 + bool "Deny server sockets to group"
48333 + depends on GRKERNSEC_SOCKET
48334 + help
48335 + If you say Y here, you will be able to choose a GID of whose users will
48336 + be unable to run server applications from your machine. If the sysctl
48337 + option is enabled, a sysctl option with name "socket_server" is created.
48338 +
48339 +config GRKERNSEC_SOCKET_SERVER_GID
48340 + int "GID to deny server sockets for"
48341 + depends on GRKERNSEC_SOCKET_SERVER
48342 + default 1002
48343 + help
48344 + Here you can choose the GID to disable server socket access for.
48345 + Remember to add the users you want server socket access disabled for to
48346 + the GID specified here. If the sysctl option is enabled, a sysctl
48347 + option with name "socket_server_gid" is created.
48348 +
48349 +endmenu
48350 +menu "Sysctl support"
48351 +depends on GRKERNSEC && SYSCTL
48352 +
48353 +config GRKERNSEC_SYSCTL
48354 + bool "Sysctl support"
48355 + help
48356 + If you say Y here, you will be able to change the options that
48357 + grsecurity runs with at bootup, without having to recompile your
48358 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48359 + to enable (1) or disable (0) various features. All the sysctl entries
48360 + are mutable until the "grsec_lock" entry is set to a non-zero value.
48361 + All features enabled in the kernel configuration are disabled at boot
48362 + if you do not say Y to the "Turn on features by default" option.
48363 + All options should be set at startup, and the grsec_lock entry should
48364 + be set to a non-zero value after all the options are set.
48365 + *THIS IS EXTREMELY IMPORTANT*
48366 +
48367 +config GRKERNSEC_SYSCTL_DISTRO
48368 + bool "Extra sysctl support for distro makers (READ HELP)"
48369 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48370 + help
48371 + If you say Y here, additional sysctl options will be created
48372 + for features that affect processes running as root. Therefore,
48373 + it is critical when using this option that the grsec_lock entry be
48374 + enabled after boot. Only distros with prebuilt kernel packages
48375 + with this option enabled that can ensure grsec_lock is enabled
48376 + after boot should use this option.
48377 + *Failure to set grsec_lock after boot makes all grsec features
48378 + this option covers useless*
48379 +
48380 + Currently this option creates the following sysctl entries:
48381 + "Disable Privileged I/O": "disable_priv_io"
48382 +
48383 +config GRKERNSEC_SYSCTL_ON
48384 + bool "Turn on features by default"
48385 + depends on GRKERNSEC_SYSCTL
48386 + help
48387 + If you say Y here, instead of having all features enabled in the
48388 + kernel configuration disabled at boot time, the features will be
48389 + enabled at boot time. It is recommended you say Y here unless
48390 + there is some reason you would want all sysctl-tunable features to
48391 + be disabled by default. As mentioned elsewhere, it is important
48392 + to enable the grsec_lock entry once you have finished modifying
48393 + the sysctl entries.
48394 +
48395 +endmenu
48396 +menu "Logging Options"
48397 +depends on GRKERNSEC
48398 +
48399 +config GRKERNSEC_FLOODTIME
48400 + int "Seconds in between log messages (minimum)"
48401 + default 10
48402 + help
48403 + This option allows you to enforce the number of seconds between
48404 + grsecurity log messages. The default should be suitable for most
48405 + people, however, if you choose to change it, choose a value small enough
48406 + to allow informative logs to be produced, but large enough to
48407 + prevent flooding.
48408 +
48409 +config GRKERNSEC_FLOODBURST
48410 + int "Number of messages in a burst (maximum)"
48411 + default 6
48412 + help
48413 + This option allows you to choose the maximum number of messages allowed
48414 + within the flood time interval you chose in a separate option. The
48415 + default should be suitable for most people, however if you find that
48416 + many of your logs are being interpreted as flooding, you may want to
48417 + raise this value.
48418 +
48419 +endmenu
48420 +
48421 +endmenu
48422 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48423 new file mode 100644
48424 index 0000000..496e60d
48425 --- /dev/null
48426 +++ b/grsecurity/Makefile
48427 @@ -0,0 +1,40 @@
48428 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48429 +# during 2001-2009 it has been completely redesigned by Brad Spengler
48430 +# into an RBAC system
48431 +#
48432 +# All code in this directory and various hooks inserted throughout the kernel
48433 +# are copyright Brad Spengler - Open Source Security, Inc., and released
48434 +# under the GPL v2 or higher
48435 +
48436 +ifndef CONFIG_IA64
48437 +KBUILD_CFLAGS += -Werror
48438 +endif
48439 +
48440 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48441 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
48442 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48443 +
48444 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48445 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48446 + gracl_learn.o grsec_log.o
48447 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48448 +
48449 +ifdef CONFIG_NET
48450 +obj-y += grsec_sock.o
48451 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48452 +endif
48453 +
48454 +ifndef CONFIG_GRKERNSEC
48455 +obj-y += grsec_disabled.o
48456 +endif
48457 +
48458 +ifdef CONFIG_GRKERNSEC_HIDESYM
48459 +extra-y := grsec_hidesym.o
48460 +$(obj)/grsec_hidesym.o:
48461 + @-chmod -f 500 /boot
48462 + @-chmod -f 500 /lib/modules
48463 + @-chmod -f 500 /lib64/modules
48464 + @-chmod -f 500 /lib32/modules
48465 + @-chmod -f 700 .
48466 + @echo ' grsec: protected kernel image paths'
48467 +endif
48468 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48469 new file mode 100644
48470 index 0000000..2733872
48471 --- /dev/null
48472 +++ b/grsecurity/gracl.c
48473 @@ -0,0 +1,4163 @@
48474 +#include <linux/kernel.h>
48475 +#include <linux/module.h>
48476 +#include <linux/sched.h>
48477 +#include <linux/mm.h>
48478 +#include <linux/file.h>
48479 +#include <linux/fs.h>
48480 +#include <linux/namei.h>
48481 +#include <linux/mount.h>
48482 +#include <linux/tty.h>
48483 +#include <linux/proc_fs.h>
48484 +#include <linux/lglock.h>
48485 +#include <linux/slab.h>
48486 +#include <linux/vmalloc.h>
48487 +#include <linux/types.h>
48488 +#include <linux/sysctl.h>
48489 +#include <linux/netdevice.h>
48490 +#include <linux/ptrace.h>
48491 +#include <linux/gracl.h>
48492 +#include <linux/gralloc.h>
48493 +#include <linux/security.h>
48494 +#include <linux/grinternal.h>
48495 +#include <linux/pid_namespace.h>
48496 +#include <linux/fdtable.h>
48497 +#include <linux/percpu.h>
48498 +
48499 +#include <asm/uaccess.h>
48500 +#include <asm/errno.h>
48501 +#include <asm/mman.h>
48502 +
48503 +static struct acl_role_db acl_role_set;
48504 +static struct name_db name_set;
48505 +static struct inodev_db inodev_set;
48506 +
48507 +/* for keeping track of userspace pointers used for subjects, so we
48508 + can share references in the kernel as well
48509 +*/
48510 +
48511 +static struct path real_root;
48512 +
48513 +static struct acl_subj_map_db subj_map_set;
48514 +
48515 +static struct acl_role_label *default_role;
48516 +
48517 +static struct acl_role_label *role_list;
48518 +
48519 +static u16 acl_sp_role_value;
48520 +
48521 +extern char *gr_shared_page[4];
48522 +static DEFINE_MUTEX(gr_dev_mutex);
48523 +DEFINE_RWLOCK(gr_inode_lock);
48524 +
48525 +struct gr_arg *gr_usermode;
48526 +
48527 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
48528 +
48529 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48530 +extern void gr_clear_learn_entries(void);
48531 +
48532 +#ifdef CONFIG_GRKERNSEC_RESLOG
48533 +extern void gr_log_resource(const struct task_struct *task,
48534 + const int res, const unsigned long wanted, const int gt);
48535 +#endif
48536 +
48537 +unsigned char *gr_system_salt;
48538 +unsigned char *gr_system_sum;
48539 +
48540 +static struct sprole_pw **acl_special_roles = NULL;
48541 +static __u16 num_sprole_pws = 0;
48542 +
48543 +static struct acl_role_label *kernel_role = NULL;
48544 +
48545 +static unsigned int gr_auth_attempts = 0;
48546 +static unsigned long gr_auth_expires = 0UL;
48547 +
48548 +#ifdef CONFIG_NET
48549 +extern struct vfsmount *sock_mnt;
48550 +#endif
48551 +
48552 +extern struct vfsmount *pipe_mnt;
48553 +extern struct vfsmount *shm_mnt;
48554 +#ifdef CONFIG_HUGETLBFS
48555 +extern struct vfsmount *hugetlbfs_vfsmount;
48556 +#endif
48557 +
48558 +static struct acl_object_label *fakefs_obj_rw;
48559 +static struct acl_object_label *fakefs_obj_rwx;
48560 +
48561 +extern int gr_init_uidset(void);
48562 +extern void gr_free_uidset(void);
48563 +extern void gr_remove_uid(uid_t uid);
48564 +extern int gr_find_uid(uid_t uid);
48565 +
48566 +DECLARE_BRLOCK(vfsmount_lock);
48567 +
48568 +__inline__ int
48569 +gr_acl_is_enabled(void)
48570 +{
48571 + return (gr_status & GR_READY);
48572 +}
48573 +
48574 +#ifdef CONFIG_BTRFS_FS
48575 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48576 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48577 +#endif
48578 +
48579 +static inline dev_t __get_dev(const struct dentry *dentry)
48580 +{
48581 +#ifdef CONFIG_BTRFS_FS
48582 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48583 + return get_btrfs_dev_from_inode(dentry->d_inode);
48584 + else
48585 +#endif
48586 + return dentry->d_inode->i_sb->s_dev;
48587 +}
48588 +
48589 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48590 +{
48591 + return __get_dev(dentry);
48592 +}
48593 +
48594 +static char gr_task_roletype_to_char(struct task_struct *task)
48595 +{
48596 + switch (task->role->roletype &
48597 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48598 + GR_ROLE_SPECIAL)) {
48599 + case GR_ROLE_DEFAULT:
48600 + return 'D';
48601 + case GR_ROLE_USER:
48602 + return 'U';
48603 + case GR_ROLE_GROUP:
48604 + return 'G';
48605 + case GR_ROLE_SPECIAL:
48606 + return 'S';
48607 + }
48608 +
48609 + return 'X';
48610 +}
48611 +
48612 +char gr_roletype_to_char(void)
48613 +{
48614 + return gr_task_roletype_to_char(current);
48615 +}
48616 +
48617 +__inline__ int
48618 +gr_acl_tpe_check(void)
48619 +{
48620 + if (unlikely(!(gr_status & GR_READY)))
48621 + return 0;
48622 + if (current->role->roletype & GR_ROLE_TPE)
48623 + return 1;
48624 + else
48625 + return 0;
48626 +}
48627 +
48628 +int
48629 +gr_handle_rawio(const struct inode *inode)
48630 +{
48631 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48632 + if (inode && S_ISBLK(inode->i_mode) &&
48633 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48634 + !capable(CAP_SYS_RAWIO))
48635 + return 1;
48636 +#endif
48637 + return 0;
48638 +}
48639 +
48640 +static int
48641 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48642 +{
48643 + if (likely(lena != lenb))
48644 + return 0;
48645 +
48646 + return !memcmp(a, b, lena);
48647 +}
48648 +
48649 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48650 +{
48651 + *buflen -= namelen;
48652 + if (*buflen < 0)
48653 + return -ENAMETOOLONG;
48654 + *buffer -= namelen;
48655 + memcpy(*buffer, str, namelen);
48656 + return 0;
48657 +}
48658 +
48659 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48660 +{
48661 + return prepend(buffer, buflen, name->name, name->len);
48662 +}
48663 +
48664 +static int prepend_path(const struct path *path, struct path *root,
48665 + char **buffer, int *buflen)
48666 +{
48667 + struct dentry *dentry = path->dentry;
48668 + struct vfsmount *vfsmnt = path->mnt;
48669 + bool slash = false;
48670 + int error = 0;
48671 +
48672 + while (dentry != root->dentry || vfsmnt != root->mnt) {
48673 + struct dentry * parent;
48674 +
48675 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48676 + /* Global root? */
48677 + if (vfsmnt->mnt_parent == vfsmnt) {
48678 + goto out;
48679 + }
48680 + dentry = vfsmnt->mnt_mountpoint;
48681 + vfsmnt = vfsmnt->mnt_parent;
48682 + continue;
48683 + }
48684 + parent = dentry->d_parent;
48685 + prefetch(parent);
48686 + spin_lock(&dentry->d_lock);
48687 + error = prepend_name(buffer, buflen, &dentry->d_name);
48688 + spin_unlock(&dentry->d_lock);
48689 + if (!error)
48690 + error = prepend(buffer, buflen, "/", 1);
48691 + if (error)
48692 + break;
48693 +
48694 + slash = true;
48695 + dentry = parent;
48696 + }
48697 +
48698 +out:
48699 + if (!error && !slash)
48700 + error = prepend(buffer, buflen, "/", 1);
48701 +
48702 + return error;
48703 +}
48704 +
48705 +/* this must be called with vfsmount_lock and rename_lock held */
48706 +
48707 +static char *__our_d_path(const struct path *path, struct path *root,
48708 + char *buf, int buflen)
48709 +{
48710 + char *res = buf + buflen;
48711 + int error;
48712 +
48713 + prepend(&res, &buflen, "\0", 1);
48714 + error = prepend_path(path, root, &res, &buflen);
48715 + if (error)
48716 + return ERR_PTR(error);
48717 +
48718 + return res;
48719 +}
48720 +
48721 +static char *
48722 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48723 +{
48724 + char *retval;
48725 +
48726 + retval = __our_d_path(path, root, buf, buflen);
48727 + if (unlikely(IS_ERR(retval)))
48728 + retval = strcpy(buf, "<path too long>");
48729 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48730 + retval[1] = '\0';
48731 +
48732 + return retval;
48733 +}
48734 +
48735 +static char *
48736 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48737 + char *buf, int buflen)
48738 +{
48739 + struct path path;
48740 + char *res;
48741 +
48742 + path.dentry = (struct dentry *)dentry;
48743 + path.mnt = (struct vfsmount *)vfsmnt;
48744 +
48745 + /* we can use real_root.dentry, real_root.mnt, because this is only called
48746 + by the RBAC system */
48747 + res = gen_full_path(&path, &real_root, buf, buflen);
48748 +
48749 + return res;
48750 +}
48751 +
48752 +static char *
48753 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48754 + char *buf, int buflen)
48755 +{
48756 + char *res;
48757 + struct path path;
48758 + struct path root;
48759 + struct task_struct *reaper = &init_task;
48760 +
48761 + path.dentry = (struct dentry *)dentry;
48762 + path.mnt = (struct vfsmount *)vfsmnt;
48763 +
48764 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48765 + get_fs_root(reaper->fs, &root);
48766 +
48767 + write_seqlock(&rename_lock);
48768 + br_read_lock(vfsmount_lock);
48769 + res = gen_full_path(&path, &root, buf, buflen);
48770 + br_read_unlock(vfsmount_lock);
48771 + write_sequnlock(&rename_lock);
48772 +
48773 + path_put(&root);
48774 + return res;
48775 +}
48776 +
48777 +static char *
48778 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48779 +{
48780 + char *ret;
48781 + write_seqlock(&rename_lock);
48782 + br_read_lock(vfsmount_lock);
48783 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48784 + PAGE_SIZE);
48785 + br_read_unlock(vfsmount_lock);
48786 + write_sequnlock(&rename_lock);
48787 + return ret;
48788 +}
48789 +
48790 +static char *
48791 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48792 +{
48793 + char *ret;
48794 + char *buf;
48795 + int buflen;
48796 +
48797 + write_seqlock(&rename_lock);
48798 + br_read_lock(vfsmount_lock);
48799 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48800 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48801 + buflen = (int)(ret - buf);
48802 + if (buflen >= 5)
48803 + prepend(&ret, &buflen, "/proc", 5);
48804 + else
48805 + ret = strcpy(buf, "<path too long>");
48806 + br_read_unlock(vfsmount_lock);
48807 + write_sequnlock(&rename_lock);
48808 + return ret;
48809 +}
48810 +
48811 +char *
48812 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48813 +{
48814 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48815 + PAGE_SIZE);
48816 +}
48817 +
48818 +char *
48819 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48820 +{
48821 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48822 + PAGE_SIZE);
48823 +}
48824 +
48825 +char *
48826 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48827 +{
48828 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48829 + PAGE_SIZE);
48830 +}
48831 +
48832 +char *
48833 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48834 +{
48835 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48836 + PAGE_SIZE);
48837 +}
48838 +
48839 +char *
48840 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48841 +{
48842 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48843 + PAGE_SIZE);
48844 +}
48845 +
48846 +__inline__ __u32
48847 +to_gr_audit(const __u32 reqmode)
48848 +{
48849 + /* masks off auditable permission flags, then shifts them to create
48850 + auditing flags, and adds the special case of append auditing if
48851 + we're requesting write */
48852 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48853 +}
48854 +
48855 +struct acl_subject_label *
48856 +lookup_subject_map(const struct acl_subject_label *userp)
48857 +{
48858 + unsigned int index = shash(userp, subj_map_set.s_size);
48859 + struct subject_map *match;
48860 +
48861 + match = subj_map_set.s_hash[index];
48862 +
48863 + while (match && match->user != userp)
48864 + match = match->next;
48865 +
48866 + if (match != NULL)
48867 + return match->kernel;
48868 + else
48869 + return NULL;
48870 +}
48871 +
48872 +static void
48873 +insert_subj_map_entry(struct subject_map *subjmap)
48874 +{
48875 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48876 + struct subject_map **curr;
48877 +
48878 + subjmap->prev = NULL;
48879 +
48880 + curr = &subj_map_set.s_hash[index];
48881 + if (*curr != NULL)
48882 + (*curr)->prev = subjmap;
48883 +
48884 + subjmap->next = *curr;
48885 + *curr = subjmap;
48886 +
48887 + return;
48888 +}
48889 +
48890 +static struct acl_role_label *
48891 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48892 + const gid_t gid)
48893 +{
48894 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48895 + struct acl_role_label *match;
48896 + struct role_allowed_ip *ipp;
48897 + unsigned int x;
48898 + u32 curr_ip = task->signal->curr_ip;
48899 +
48900 + task->signal->saved_ip = curr_ip;
48901 +
48902 + match = acl_role_set.r_hash[index];
48903 +
48904 + while (match) {
48905 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48906 + for (x = 0; x < match->domain_child_num; x++) {
48907 + if (match->domain_children[x] == uid)
48908 + goto found;
48909 + }
48910 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48911 + break;
48912 + match = match->next;
48913 + }
48914 +found:
48915 + if (match == NULL) {
48916 + try_group:
48917 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48918 + match = acl_role_set.r_hash[index];
48919 +
48920 + while (match) {
48921 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48922 + for (x = 0; x < match->domain_child_num; x++) {
48923 + if (match->domain_children[x] == gid)
48924 + goto found2;
48925 + }
48926 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48927 + break;
48928 + match = match->next;
48929 + }
48930 +found2:
48931 + if (match == NULL)
48932 + match = default_role;
48933 + if (match->allowed_ips == NULL)
48934 + return match;
48935 + else {
48936 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48937 + if (likely
48938 + ((ntohl(curr_ip) & ipp->netmask) ==
48939 + (ntohl(ipp->addr) & ipp->netmask)))
48940 + return match;
48941 + }
48942 + match = default_role;
48943 + }
48944 + } else if (match->allowed_ips == NULL) {
48945 + return match;
48946 + } else {
48947 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48948 + if (likely
48949 + ((ntohl(curr_ip) & ipp->netmask) ==
48950 + (ntohl(ipp->addr) & ipp->netmask)))
48951 + return match;
48952 + }
48953 + goto try_group;
48954 + }
48955 +
48956 + return match;
48957 +}
48958 +
48959 +struct acl_subject_label *
48960 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48961 + const struct acl_role_label *role)
48962 +{
48963 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48964 + struct acl_subject_label *match;
48965 +
48966 + match = role->subj_hash[index];
48967 +
48968 + while (match && (match->inode != ino || match->device != dev ||
48969 + (match->mode & GR_DELETED))) {
48970 + match = match->next;
48971 + }
48972 +
48973 + if (match && !(match->mode & GR_DELETED))
48974 + return match;
48975 + else
48976 + return NULL;
48977 +}
48978 +
48979 +struct acl_subject_label *
48980 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48981 + const struct acl_role_label *role)
48982 +{
48983 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48984 + struct acl_subject_label *match;
48985 +
48986 + match = role->subj_hash[index];
48987 +
48988 + while (match && (match->inode != ino || match->device != dev ||
48989 + !(match->mode & GR_DELETED))) {
48990 + match = match->next;
48991 + }
48992 +
48993 + if (match && (match->mode & GR_DELETED))
48994 + return match;
48995 + else
48996 + return NULL;
48997 +}
48998 +
48999 +static struct acl_object_label *
49000 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
49001 + const struct acl_subject_label *subj)
49002 +{
49003 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
49004 + struct acl_object_label *match;
49005 +
49006 + match = subj->obj_hash[index];
49007 +
49008 + while (match && (match->inode != ino || match->device != dev ||
49009 + (match->mode & GR_DELETED))) {
49010 + match = match->next;
49011 + }
49012 +
49013 + if (match && !(match->mode & GR_DELETED))
49014 + return match;
49015 + else
49016 + return NULL;
49017 +}
49018 +
49019 +static struct acl_object_label *
49020 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
49021 + const struct acl_subject_label *subj)
49022 +{
49023 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
49024 + struct acl_object_label *match;
49025 +
49026 + match = subj->obj_hash[index];
49027 +
49028 + while (match && (match->inode != ino || match->device != dev ||
49029 + !(match->mode & GR_DELETED))) {
49030 + match = match->next;
49031 + }
49032 +
49033 + if (match && (match->mode & GR_DELETED))
49034 + return match;
49035 +
49036 + match = subj->obj_hash[index];
49037 +
49038 + while (match && (match->inode != ino || match->device != dev ||
49039 + (match->mode & GR_DELETED))) {
49040 + match = match->next;
49041 + }
49042 +
49043 + if (match && !(match->mode & GR_DELETED))
49044 + return match;
49045 + else
49046 + return NULL;
49047 +}
49048 +
49049 +static struct name_entry *
49050 +lookup_name_entry(const char *name)
49051 +{
49052 + unsigned int len = strlen(name);
49053 + unsigned int key = full_name_hash(name, len);
49054 + unsigned int index = key % name_set.n_size;
49055 + struct name_entry *match;
49056 +
49057 + match = name_set.n_hash[index];
49058 +
49059 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
49060 + match = match->next;
49061 +
49062 + return match;
49063 +}
49064 +
49065 +static struct name_entry *
49066 +lookup_name_entry_create(const char *name)
49067 +{
49068 + unsigned int len = strlen(name);
49069 + unsigned int key = full_name_hash(name, len);
49070 + unsigned int index = key % name_set.n_size;
49071 + struct name_entry *match;
49072 +
49073 + match = name_set.n_hash[index];
49074 +
49075 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49076 + !match->deleted))
49077 + match = match->next;
49078 +
49079 + if (match && match->deleted)
49080 + return match;
49081 +
49082 + match = name_set.n_hash[index];
49083 +
49084 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49085 + match->deleted))
49086 + match = match->next;
49087 +
49088 + if (match && !match->deleted)
49089 + return match;
49090 + else
49091 + return NULL;
49092 +}
49093 +
49094 +static struct inodev_entry *
49095 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
49096 +{
49097 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
49098 + struct inodev_entry *match;
49099 +
49100 + match = inodev_set.i_hash[index];
49101 +
49102 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
49103 + match = match->next;
49104 +
49105 + return match;
49106 +}
49107 +
49108 +static void
49109 +insert_inodev_entry(struct inodev_entry *entry)
49110 +{
49111 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
49112 + inodev_set.i_size);
49113 + struct inodev_entry **curr;
49114 +
49115 + entry->prev = NULL;
49116 +
49117 + curr = &inodev_set.i_hash[index];
49118 + if (*curr != NULL)
49119 + (*curr)->prev = entry;
49120 +
49121 + entry->next = *curr;
49122 + *curr = entry;
49123 +
49124 + return;
49125 +}
49126 +
49127 +static void
49128 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
49129 +{
49130 + unsigned int index =
49131 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
49132 + struct acl_role_label **curr;
49133 + struct acl_role_label *tmp;
49134 +
49135 + curr = &acl_role_set.r_hash[index];
49136 +
49137 + /* if role was already inserted due to domains and already has
49138 + a role in the same bucket as it attached, then we need to
49139 + combine these two buckets
49140 + */
49141 + if (role->next) {
49142 + tmp = role->next;
49143 + while (tmp->next)
49144 + tmp = tmp->next;
49145 + tmp->next = *curr;
49146 + } else
49147 + role->next = *curr;
49148 + *curr = role;
49149 +
49150 + return;
49151 +}
49152 +
49153 +static void
49154 +insert_acl_role_label(struct acl_role_label *role)
49155 +{
49156 + int i;
49157 +
49158 + if (role_list == NULL) {
49159 + role_list = role;
49160 + role->prev = NULL;
49161 + } else {
49162 + role->prev = role_list;
49163 + role_list = role;
49164 + }
49165 +
49166 + /* used for hash chains */
49167 + role->next = NULL;
49168 +
49169 + if (role->roletype & GR_ROLE_DOMAIN) {
49170 + for (i = 0; i < role->domain_child_num; i++)
49171 + __insert_acl_role_label(role, role->domain_children[i]);
49172 + } else
49173 + __insert_acl_role_label(role, role->uidgid);
49174 +}
49175 +
49176 +static int
49177 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
49178 +{
49179 + struct name_entry **curr, *nentry;
49180 + struct inodev_entry *ientry;
49181 + unsigned int len = strlen(name);
49182 + unsigned int key = full_name_hash(name, len);
49183 + unsigned int index = key % name_set.n_size;
49184 +
49185 + curr = &name_set.n_hash[index];
49186 +
49187 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49188 + curr = &((*curr)->next);
49189 +
49190 + if (*curr != NULL)
49191 + return 1;
49192 +
49193 + nentry = acl_alloc(sizeof (struct name_entry));
49194 + if (nentry == NULL)
49195 + return 0;
49196 + ientry = acl_alloc(sizeof (struct inodev_entry));
49197 + if (ientry == NULL)
49198 + return 0;
49199 + ientry->nentry = nentry;
49200 +
49201 + nentry->key = key;
49202 + nentry->name = name;
49203 + nentry->inode = inode;
49204 + nentry->device = device;
49205 + nentry->len = len;
49206 + nentry->deleted = deleted;
49207 +
49208 + nentry->prev = NULL;
49209 + curr = &name_set.n_hash[index];
49210 + if (*curr != NULL)
49211 + (*curr)->prev = nentry;
49212 + nentry->next = *curr;
49213 + *curr = nentry;
49214 +
49215 + /* insert us into the table searchable by inode/dev */
49216 + insert_inodev_entry(ientry);
49217 +
49218 + return 1;
49219 +}
49220 +
49221 +static void
49222 +insert_acl_obj_label(struct acl_object_label *obj,
49223 + struct acl_subject_label *subj)
49224 +{
49225 + unsigned int index =
49226 + fhash(obj->inode, obj->device, subj->obj_hash_size);
49227 + struct acl_object_label **curr;
49228 +
49229 +
49230 + obj->prev = NULL;
49231 +
49232 + curr = &subj->obj_hash[index];
49233 + if (*curr != NULL)
49234 + (*curr)->prev = obj;
49235 +
49236 + obj->next = *curr;
49237 + *curr = obj;
49238 +
49239 + return;
49240 +}
49241 +
49242 +static void
49243 +insert_acl_subj_label(struct acl_subject_label *obj,
49244 + struct acl_role_label *role)
49245 +{
49246 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49247 + struct acl_subject_label **curr;
49248 +
49249 + obj->prev = NULL;
49250 +
49251 + curr = &role->subj_hash[index];
49252 + if (*curr != NULL)
49253 + (*curr)->prev = obj;
49254 +
49255 + obj->next = *curr;
49256 + *curr = obj;
49257 +
49258 + return;
49259 +}
49260 +
49261 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49262 +
49263 +static void *
49264 +create_table(__u32 * len, int elementsize)
49265 +{
49266 + unsigned int table_sizes[] = {
49267 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49268 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49269 + 4194301, 8388593, 16777213, 33554393, 67108859
49270 + };
49271 + void *newtable = NULL;
49272 + unsigned int pwr = 0;
49273 +
49274 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49275 + table_sizes[pwr] <= *len)
49276 + pwr++;
49277 +
49278 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49279 + return newtable;
49280 +
49281 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49282 + newtable =
49283 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49284 + else
49285 + newtable = vmalloc(table_sizes[pwr] * elementsize);
49286 +
49287 + *len = table_sizes[pwr];
49288 +
49289 + return newtable;
49290 +}
49291 +
49292 +static int
49293 +init_variables(const struct gr_arg *arg)
49294 +{
49295 + struct task_struct *reaper = &init_task;
49296 + unsigned int stacksize;
49297 +
49298 + subj_map_set.s_size = arg->role_db.num_subjects;
49299 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49300 + name_set.n_size = arg->role_db.num_objects;
49301 + inodev_set.i_size = arg->role_db.num_objects;
49302 +
49303 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
49304 + !name_set.n_size || !inodev_set.i_size)
49305 + return 1;
49306 +
49307 + if (!gr_init_uidset())
49308 + return 1;
49309 +
49310 + /* set up the stack that holds allocation info */
49311 +
49312 + stacksize = arg->role_db.num_pointers + 5;
49313 +
49314 + if (!acl_alloc_stack_init(stacksize))
49315 + return 1;
49316 +
49317 + /* grab reference for the real root dentry and vfsmount */
49318 + get_fs_root(reaper->fs, &real_root);
49319 +
49320 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49321 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49322 +#endif
49323 +
49324 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49325 + if (fakefs_obj_rw == NULL)
49326 + return 1;
49327 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49328 +
49329 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49330 + if (fakefs_obj_rwx == NULL)
49331 + return 1;
49332 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49333 +
49334 + subj_map_set.s_hash =
49335 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49336 + acl_role_set.r_hash =
49337 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49338 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49339 + inodev_set.i_hash =
49340 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49341 +
49342 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49343 + !name_set.n_hash || !inodev_set.i_hash)
49344 + return 1;
49345 +
49346 + memset(subj_map_set.s_hash, 0,
49347 + sizeof(struct subject_map *) * subj_map_set.s_size);
49348 + memset(acl_role_set.r_hash, 0,
49349 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
49350 + memset(name_set.n_hash, 0,
49351 + sizeof (struct name_entry *) * name_set.n_size);
49352 + memset(inodev_set.i_hash, 0,
49353 + sizeof (struct inodev_entry *) * inodev_set.i_size);
49354 +
49355 + return 0;
49356 +}
49357 +
49358 +/* free information not needed after startup
49359 + currently contains user->kernel pointer mappings for subjects
49360 +*/
49361 +
49362 +static void
49363 +free_init_variables(void)
49364 +{
49365 + __u32 i;
49366 +
49367 + if (subj_map_set.s_hash) {
49368 + for (i = 0; i < subj_map_set.s_size; i++) {
49369 + if (subj_map_set.s_hash[i]) {
49370 + kfree(subj_map_set.s_hash[i]);
49371 + subj_map_set.s_hash[i] = NULL;
49372 + }
49373 + }
49374 +
49375 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49376 + PAGE_SIZE)
49377 + kfree(subj_map_set.s_hash);
49378 + else
49379 + vfree(subj_map_set.s_hash);
49380 + }
49381 +
49382 + return;
49383 +}
49384 +
49385 +static void
49386 +free_variables(void)
49387 +{
49388 + struct acl_subject_label *s;
49389 + struct acl_role_label *r;
49390 + struct task_struct *task, *task2;
49391 + unsigned int x;
49392 +
49393 + gr_clear_learn_entries();
49394 +
49395 + read_lock(&tasklist_lock);
49396 + do_each_thread(task2, task) {
49397 + task->acl_sp_role = 0;
49398 + task->acl_role_id = 0;
49399 + task->acl = NULL;
49400 + task->role = NULL;
49401 + } while_each_thread(task2, task);
49402 + read_unlock(&tasklist_lock);
49403 +
49404 + /* release the reference to the real root dentry and vfsmount */
49405 + path_put(&real_root);
49406 +
49407 + /* free all object hash tables */
49408 +
49409 + FOR_EACH_ROLE_START(r)
49410 + if (r->subj_hash == NULL)
49411 + goto next_role;
49412 + FOR_EACH_SUBJECT_START(r, s, x)
49413 + if (s->obj_hash == NULL)
49414 + break;
49415 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49416 + kfree(s->obj_hash);
49417 + else
49418 + vfree(s->obj_hash);
49419 + FOR_EACH_SUBJECT_END(s, x)
49420 + FOR_EACH_NESTED_SUBJECT_START(r, s)
49421 + if (s->obj_hash == NULL)
49422 + break;
49423 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49424 + kfree(s->obj_hash);
49425 + else
49426 + vfree(s->obj_hash);
49427 + FOR_EACH_NESTED_SUBJECT_END(s)
49428 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49429 + kfree(r->subj_hash);
49430 + else
49431 + vfree(r->subj_hash);
49432 + r->subj_hash = NULL;
49433 +next_role:
49434 + FOR_EACH_ROLE_END(r)
49435 +
49436 + acl_free_all();
49437 +
49438 + if (acl_role_set.r_hash) {
49439 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49440 + PAGE_SIZE)
49441 + kfree(acl_role_set.r_hash);
49442 + else
49443 + vfree(acl_role_set.r_hash);
49444 + }
49445 + if (name_set.n_hash) {
49446 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
49447 + PAGE_SIZE)
49448 + kfree(name_set.n_hash);
49449 + else
49450 + vfree(name_set.n_hash);
49451 + }
49452 +
49453 + if (inodev_set.i_hash) {
49454 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49455 + PAGE_SIZE)
49456 + kfree(inodev_set.i_hash);
49457 + else
49458 + vfree(inodev_set.i_hash);
49459 + }
49460 +
49461 + gr_free_uidset();
49462 +
49463 + memset(&name_set, 0, sizeof (struct name_db));
49464 + memset(&inodev_set, 0, sizeof (struct inodev_db));
49465 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49466 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49467 +
49468 + default_role = NULL;
49469 + role_list = NULL;
49470 +
49471 + return;
49472 +}
49473 +
49474 +static __u32
49475 +count_user_objs(struct acl_object_label *userp)
49476 +{
49477 + struct acl_object_label o_tmp;
49478 + __u32 num = 0;
49479 +
49480 + while (userp) {
49481 + if (copy_from_user(&o_tmp, userp,
49482 + sizeof (struct acl_object_label)))
49483 + break;
49484 +
49485 + userp = o_tmp.prev;
49486 + num++;
49487 + }
49488 +
49489 + return num;
49490 +}
49491 +
49492 +static struct acl_subject_label *
49493 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49494 +
49495 +static int
49496 +copy_user_glob(struct acl_object_label *obj)
49497 +{
49498 + struct acl_object_label *g_tmp, **guser;
49499 + unsigned int len;
49500 + char *tmp;
49501 +
49502 + if (obj->globbed == NULL)
49503 + return 0;
49504 +
49505 + guser = &obj->globbed;
49506 + while (*guser) {
49507 + g_tmp = (struct acl_object_label *)
49508 + acl_alloc(sizeof (struct acl_object_label));
49509 + if (g_tmp == NULL)
49510 + return -ENOMEM;
49511 +
49512 + if (copy_from_user(g_tmp, *guser,
49513 + sizeof (struct acl_object_label)))
49514 + return -EFAULT;
49515 +
49516 + len = strnlen_user(g_tmp->filename, PATH_MAX);
49517 +
49518 + if (!len || len >= PATH_MAX)
49519 + return -EINVAL;
49520 +
49521 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49522 + return -ENOMEM;
49523 +
49524 + if (copy_from_user(tmp, g_tmp->filename, len))
49525 + return -EFAULT;
49526 + tmp[len-1] = '\0';
49527 + g_tmp->filename = tmp;
49528 +
49529 + *guser = g_tmp;
49530 + guser = &(g_tmp->next);
49531 + }
49532 +
49533 + return 0;
49534 +}
49535 +
49536 +static int
49537 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49538 + struct acl_role_label *role)
49539 +{
49540 + struct acl_object_label *o_tmp;
49541 + unsigned int len;
49542 + int ret;
49543 + char *tmp;
49544 +
49545 + while (userp) {
49546 + if ((o_tmp = (struct acl_object_label *)
49547 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
49548 + return -ENOMEM;
49549 +
49550 + if (copy_from_user(o_tmp, userp,
49551 + sizeof (struct acl_object_label)))
49552 + return -EFAULT;
49553 +
49554 + userp = o_tmp->prev;
49555 +
49556 + len = strnlen_user(o_tmp->filename, PATH_MAX);
49557 +
49558 + if (!len || len >= PATH_MAX)
49559 + return -EINVAL;
49560 +
49561 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49562 + return -ENOMEM;
49563 +
49564 + if (copy_from_user(tmp, o_tmp->filename, len))
49565 + return -EFAULT;
49566 + tmp[len-1] = '\0';
49567 + o_tmp->filename = tmp;
49568 +
49569 + insert_acl_obj_label(o_tmp, subj);
49570 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49571 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49572 + return -ENOMEM;
49573 +
49574 + ret = copy_user_glob(o_tmp);
49575 + if (ret)
49576 + return ret;
49577 +
49578 + if (o_tmp->nested) {
49579 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49580 + if (IS_ERR(o_tmp->nested))
49581 + return PTR_ERR(o_tmp->nested);
49582 +
49583 + /* insert into nested subject list */
49584 + o_tmp->nested->next = role->hash->first;
49585 + role->hash->first = o_tmp->nested;
49586 + }
49587 + }
49588 +
49589 + return 0;
49590 +}
49591 +
49592 +static __u32
49593 +count_user_subjs(struct acl_subject_label *userp)
49594 +{
49595 + struct acl_subject_label s_tmp;
49596 + __u32 num = 0;
49597 +
49598 + while (userp) {
49599 + if (copy_from_user(&s_tmp, userp,
49600 + sizeof (struct acl_subject_label)))
49601 + break;
49602 +
49603 + userp = s_tmp.prev;
49604 + /* do not count nested subjects against this count, since
49605 + they are not included in the hash table, but are
49606 + attached to objects. We have already counted
49607 + the subjects in userspace for the allocation
49608 + stack
49609 + */
49610 + if (!(s_tmp.mode & GR_NESTED))
49611 + num++;
49612 + }
49613 +
49614 + return num;
49615 +}
49616 +
49617 +static int
49618 +copy_user_allowedips(struct acl_role_label *rolep)
49619 +{
49620 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49621 +
49622 + ruserip = rolep->allowed_ips;
49623 +
49624 + while (ruserip) {
49625 + rlast = rtmp;
49626 +
49627 + if ((rtmp = (struct role_allowed_ip *)
49628 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49629 + return -ENOMEM;
49630 +
49631 + if (copy_from_user(rtmp, ruserip,
49632 + sizeof (struct role_allowed_ip)))
49633 + return -EFAULT;
49634 +
49635 + ruserip = rtmp->prev;
49636 +
49637 + if (!rlast) {
49638 + rtmp->prev = NULL;
49639 + rolep->allowed_ips = rtmp;
49640 + } else {
49641 + rlast->next = rtmp;
49642 + rtmp->prev = rlast;
49643 + }
49644 +
49645 + if (!ruserip)
49646 + rtmp->next = NULL;
49647 + }
49648 +
49649 + return 0;
49650 +}
49651 +
49652 +static int
49653 +copy_user_transitions(struct acl_role_label *rolep)
49654 +{
49655 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
49656 +
49657 + unsigned int len;
49658 + char *tmp;
49659 +
49660 + rusertp = rolep->transitions;
49661 +
49662 + while (rusertp) {
49663 + rlast = rtmp;
49664 +
49665 + if ((rtmp = (struct role_transition *)
49666 + acl_alloc(sizeof (struct role_transition))) == NULL)
49667 + return -ENOMEM;
49668 +
49669 + if (copy_from_user(rtmp, rusertp,
49670 + sizeof (struct role_transition)))
49671 + return -EFAULT;
49672 +
49673 + rusertp = rtmp->prev;
49674 +
49675 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49676 +
49677 + if (!len || len >= GR_SPROLE_LEN)
49678 + return -EINVAL;
49679 +
49680 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49681 + return -ENOMEM;
49682 +
49683 + if (copy_from_user(tmp, rtmp->rolename, len))
49684 + return -EFAULT;
49685 + tmp[len-1] = '\0';
49686 + rtmp->rolename = tmp;
49687 +
49688 + if (!rlast) {
49689 + rtmp->prev = NULL;
49690 + rolep->transitions = rtmp;
49691 + } else {
49692 + rlast->next = rtmp;
49693 + rtmp->prev = rlast;
49694 + }
49695 +
49696 + if (!rusertp)
49697 + rtmp->next = NULL;
49698 + }
49699 +
49700 + return 0;
49701 +}
49702 +
49703 +static struct acl_subject_label *
49704 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49705 +{
49706 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49707 + unsigned int len;
49708 + char *tmp;
49709 + __u32 num_objs;
49710 + struct acl_ip_label **i_tmp, *i_utmp2;
49711 + struct gr_hash_struct ghash;
49712 + struct subject_map *subjmap;
49713 + unsigned int i_num;
49714 + int err;
49715 +
49716 + s_tmp = lookup_subject_map(userp);
49717 +
49718 + /* we've already copied this subject into the kernel, just return
49719 + the reference to it, and don't copy it over again
49720 + */
49721 + if (s_tmp)
49722 + return(s_tmp);
49723 +
49724 + if ((s_tmp = (struct acl_subject_label *)
49725 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49726 + return ERR_PTR(-ENOMEM);
49727 +
49728 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49729 + if (subjmap == NULL)
49730 + return ERR_PTR(-ENOMEM);
49731 +
49732 + subjmap->user = userp;
49733 + subjmap->kernel = s_tmp;
49734 + insert_subj_map_entry(subjmap);
49735 +
49736 + if (copy_from_user(s_tmp, userp,
49737 + sizeof (struct acl_subject_label)))
49738 + return ERR_PTR(-EFAULT);
49739 +
49740 + len = strnlen_user(s_tmp->filename, PATH_MAX);
49741 +
49742 + if (!len || len >= PATH_MAX)
49743 + return ERR_PTR(-EINVAL);
49744 +
49745 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49746 + return ERR_PTR(-ENOMEM);
49747 +
49748 + if (copy_from_user(tmp, s_tmp->filename, len))
49749 + return ERR_PTR(-EFAULT);
49750 + tmp[len-1] = '\0';
49751 + s_tmp->filename = tmp;
49752 +
49753 + if (!strcmp(s_tmp->filename, "/"))
49754 + role->root_label = s_tmp;
49755 +
49756 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49757 + return ERR_PTR(-EFAULT);
49758 +
49759 + /* copy user and group transition tables */
49760 +
49761 + if (s_tmp->user_trans_num) {
49762 + uid_t *uidlist;
49763 +
49764 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49765 + if (uidlist == NULL)
49766 + return ERR_PTR(-ENOMEM);
49767 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49768 + return ERR_PTR(-EFAULT);
49769 +
49770 + s_tmp->user_transitions = uidlist;
49771 + }
49772 +
49773 + if (s_tmp->group_trans_num) {
49774 + gid_t *gidlist;
49775 +
49776 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49777 + if (gidlist == NULL)
49778 + return ERR_PTR(-ENOMEM);
49779 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49780 + return ERR_PTR(-EFAULT);
49781 +
49782 + s_tmp->group_transitions = gidlist;
49783 + }
49784 +
49785 + /* set up object hash table */
49786 + num_objs = count_user_objs(ghash.first);
49787 +
49788 + s_tmp->obj_hash_size = num_objs;
49789 + s_tmp->obj_hash =
49790 + (struct acl_object_label **)
49791 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49792 +
49793 + if (!s_tmp->obj_hash)
49794 + return ERR_PTR(-ENOMEM);
49795 +
49796 + memset(s_tmp->obj_hash, 0,
49797 + s_tmp->obj_hash_size *
49798 + sizeof (struct acl_object_label *));
49799 +
49800 + /* add in objects */
49801 + err = copy_user_objs(ghash.first, s_tmp, role);
49802 +
49803 + if (err)
49804 + return ERR_PTR(err);
49805 +
49806 + /* set pointer for parent subject */
49807 + if (s_tmp->parent_subject) {
49808 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49809 +
49810 + if (IS_ERR(s_tmp2))
49811 + return s_tmp2;
49812 +
49813 + s_tmp->parent_subject = s_tmp2;
49814 + }
49815 +
49816 + /* add in ip acls */
49817 +
49818 + if (!s_tmp->ip_num) {
49819 + s_tmp->ips = NULL;
49820 + goto insert;
49821 + }
49822 +
49823 + i_tmp =
49824 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49825 + sizeof (struct acl_ip_label *));
49826 +
49827 + if (!i_tmp)
49828 + return ERR_PTR(-ENOMEM);
49829 +
49830 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49831 + *(i_tmp + i_num) =
49832 + (struct acl_ip_label *)
49833 + acl_alloc(sizeof (struct acl_ip_label));
49834 + if (!*(i_tmp + i_num))
49835 + return ERR_PTR(-ENOMEM);
49836 +
49837 + if (copy_from_user
49838 + (&i_utmp2, s_tmp->ips + i_num,
49839 + sizeof (struct acl_ip_label *)))
49840 + return ERR_PTR(-EFAULT);
49841 +
49842 + if (copy_from_user
49843 + (*(i_tmp + i_num), i_utmp2,
49844 + sizeof (struct acl_ip_label)))
49845 + return ERR_PTR(-EFAULT);
49846 +
49847 + if ((*(i_tmp + i_num))->iface == NULL)
49848 + continue;
49849 +
49850 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49851 + if (!len || len >= IFNAMSIZ)
49852 + return ERR_PTR(-EINVAL);
49853 + tmp = acl_alloc(len);
49854 + if (tmp == NULL)
49855 + return ERR_PTR(-ENOMEM);
49856 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49857 + return ERR_PTR(-EFAULT);
49858 + (*(i_tmp + i_num))->iface = tmp;
49859 + }
49860 +
49861 + s_tmp->ips = i_tmp;
49862 +
49863 +insert:
49864 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49865 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49866 + return ERR_PTR(-ENOMEM);
49867 +
49868 + return s_tmp;
49869 +}
49870 +
49871 +static int
49872 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49873 +{
49874 + struct acl_subject_label s_pre;
49875 + struct acl_subject_label * ret;
49876 + int err;
49877 +
49878 + while (userp) {
49879 + if (copy_from_user(&s_pre, userp,
49880 + sizeof (struct acl_subject_label)))
49881 + return -EFAULT;
49882 +
49883 + /* do not add nested subjects here, add
49884 + while parsing objects
49885 + */
49886 +
49887 + if (s_pre.mode & GR_NESTED) {
49888 + userp = s_pre.prev;
49889 + continue;
49890 + }
49891 +
49892 + ret = do_copy_user_subj(userp, role);
49893 +
49894 + err = PTR_ERR(ret);
49895 + if (IS_ERR(ret))
49896 + return err;
49897 +
49898 + insert_acl_subj_label(ret, role);
49899 +
49900 + userp = s_pre.prev;
49901 + }
49902 +
49903 + return 0;
49904 +}
49905 +
49906 +static int
49907 +copy_user_acl(struct gr_arg *arg)
49908 +{
49909 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49910 + struct sprole_pw *sptmp;
49911 + struct gr_hash_struct *ghash;
49912 + uid_t *domainlist;
49913 + unsigned int r_num;
49914 + unsigned int len;
49915 + char *tmp;
49916 + int err = 0;
49917 + __u16 i;
49918 + __u32 num_subjs;
49919 +
49920 + /* we need a default and kernel role */
49921 + if (arg->role_db.num_roles < 2)
49922 + return -EINVAL;
49923 +
49924 + /* copy special role authentication info from userspace */
49925 +
49926 + num_sprole_pws = arg->num_sprole_pws;
49927 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49928 +
49929 + if (!acl_special_roles) {
49930 + err = -ENOMEM;
49931 + goto cleanup;
49932 + }
49933 +
49934 + for (i = 0; i < num_sprole_pws; i++) {
49935 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49936 + if (!sptmp) {
49937 + err = -ENOMEM;
49938 + goto cleanup;
49939 + }
49940 + if (copy_from_user(sptmp, arg->sprole_pws + i,
49941 + sizeof (struct sprole_pw))) {
49942 + err = -EFAULT;
49943 + goto cleanup;
49944 + }
49945 +
49946 + len =
49947 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49948 +
49949 + if (!len || len >= GR_SPROLE_LEN) {
49950 + err = -EINVAL;
49951 + goto cleanup;
49952 + }
49953 +
49954 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49955 + err = -ENOMEM;
49956 + goto cleanup;
49957 + }
49958 +
49959 + if (copy_from_user(tmp, sptmp->rolename, len)) {
49960 + err = -EFAULT;
49961 + goto cleanup;
49962 + }
49963 + tmp[len-1] = '\0';
49964 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49965 + printk(KERN_ALERT "Copying special role %s\n", tmp);
49966 +#endif
49967 + sptmp->rolename = tmp;
49968 + acl_special_roles[i] = sptmp;
49969 + }
49970 +
49971 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49972 +
49973 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49974 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
49975 +
49976 + if (!r_tmp) {
49977 + err = -ENOMEM;
49978 + goto cleanup;
49979 + }
49980 +
49981 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
49982 + sizeof (struct acl_role_label *))) {
49983 + err = -EFAULT;
49984 + goto cleanup;
49985 + }
49986 +
49987 + if (copy_from_user(r_tmp, r_utmp2,
49988 + sizeof (struct acl_role_label))) {
49989 + err = -EFAULT;
49990 + goto cleanup;
49991 + }
49992 +
49993 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49994 +
49995 + if (!len || len >= PATH_MAX) {
49996 + err = -EINVAL;
49997 + goto cleanup;
49998 + }
49999 +
50000 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
50001 + err = -ENOMEM;
50002 + goto cleanup;
50003 + }
50004 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
50005 + err = -EFAULT;
50006 + goto cleanup;
50007 + }
50008 + tmp[len-1] = '\0';
50009 + r_tmp->rolename = tmp;
50010 +
50011 + if (!strcmp(r_tmp->rolename, "default")
50012 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
50013 + default_role = r_tmp;
50014 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
50015 + kernel_role = r_tmp;
50016 + }
50017 +
50018 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
50019 + err = -ENOMEM;
50020 + goto cleanup;
50021 + }
50022 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
50023 + err = -EFAULT;
50024 + goto cleanup;
50025 + }
50026 +
50027 + r_tmp->hash = ghash;
50028 +
50029 + num_subjs = count_user_subjs(r_tmp->hash->first);
50030 +
50031 + r_tmp->subj_hash_size = num_subjs;
50032 + r_tmp->subj_hash =
50033 + (struct acl_subject_label **)
50034 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
50035 +
50036 + if (!r_tmp->subj_hash) {
50037 + err = -ENOMEM;
50038 + goto cleanup;
50039 + }
50040 +
50041 + err = copy_user_allowedips(r_tmp);
50042 + if (err)
50043 + goto cleanup;
50044 +
50045 + /* copy domain info */
50046 + if (r_tmp->domain_children != NULL) {
50047 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
50048 + if (domainlist == NULL) {
50049 + err = -ENOMEM;
50050 + goto cleanup;
50051 + }
50052 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
50053 + err = -EFAULT;
50054 + goto cleanup;
50055 + }
50056 + r_tmp->domain_children = domainlist;
50057 + }
50058 +
50059 + err = copy_user_transitions(r_tmp);
50060 + if (err)
50061 + goto cleanup;
50062 +
50063 + memset(r_tmp->subj_hash, 0,
50064 + r_tmp->subj_hash_size *
50065 + sizeof (struct acl_subject_label *));
50066 +
50067 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
50068 +
50069 + if (err)
50070 + goto cleanup;
50071 +
50072 + /* set nested subject list to null */
50073 + r_tmp->hash->first = NULL;
50074 +
50075 + insert_acl_role_label(r_tmp);
50076 + }
50077 +
50078 + goto return_err;
50079 + cleanup:
50080 + free_variables();
50081 + return_err:
50082 + return err;
50083 +
50084 +}
50085 +
50086 +static int
50087 +gracl_init(struct gr_arg *args)
50088 +{
50089 + int error = 0;
50090 +
50091 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
50092 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
50093 +
50094 + if (init_variables(args)) {
50095 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
50096 + error = -ENOMEM;
50097 + free_variables();
50098 + goto out;
50099 + }
50100 +
50101 + error = copy_user_acl(args);
50102 + free_init_variables();
50103 + if (error) {
50104 + free_variables();
50105 + goto out;
50106 + }
50107 +
50108 + if ((error = gr_set_acls(0))) {
50109 + free_variables();
50110 + goto out;
50111 + }
50112 +
50113 + pax_open_kernel();
50114 + gr_status |= GR_READY;
50115 + pax_close_kernel();
50116 +
50117 + out:
50118 + return error;
50119 +}
50120 +
50121 +/* derived from glibc fnmatch() 0: match, 1: no match*/
50122 +
50123 +static int
50124 +glob_match(const char *p, const char *n)
50125 +{
50126 + char c;
50127 +
50128 + while ((c = *p++) != '\0') {
50129 + switch (c) {
50130 + case '?':
50131 + if (*n == '\0')
50132 + return 1;
50133 + else if (*n == '/')
50134 + return 1;
50135 + break;
50136 + case '\\':
50137 + if (*n != c)
50138 + return 1;
50139 + break;
50140 + case '*':
50141 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
50142 + if (*n == '/')
50143 + return 1;
50144 + else if (c == '?') {
50145 + if (*n == '\0')
50146 + return 1;
50147 + else
50148 + ++n;
50149 + }
50150 + }
50151 + if (c == '\0') {
50152 + return 0;
50153 + } else {
50154 + const char *endp;
50155 +
50156 + if ((endp = strchr(n, '/')) == NULL)
50157 + endp = n + strlen(n);
50158 +
50159 + if (c == '[') {
50160 + for (--p; n < endp; ++n)
50161 + if (!glob_match(p, n))
50162 + return 0;
50163 + } else if (c == '/') {
50164 + while (*n != '\0' && *n != '/')
50165 + ++n;
50166 + if (*n == '/' && !glob_match(p, n + 1))
50167 + return 0;
50168 + } else {
50169 + for (--p; n < endp; ++n)
50170 + if (*n == c && !glob_match(p, n))
50171 + return 0;
50172 + }
50173 +
50174 + return 1;
50175 + }
50176 + case '[':
50177 + {
50178 + int not;
50179 + char cold;
50180 +
50181 + if (*n == '\0' || *n == '/')
50182 + return 1;
50183 +
50184 + not = (*p == '!' || *p == '^');
50185 + if (not)
50186 + ++p;
50187 +
50188 + c = *p++;
50189 + for (;;) {
50190 + unsigned char fn = (unsigned char)*n;
50191 +
50192 + if (c == '\0')
50193 + return 1;
50194 + else {
50195 + if (c == fn)
50196 + goto matched;
50197 + cold = c;
50198 + c = *p++;
50199 +
50200 + if (c == '-' && *p != ']') {
50201 + unsigned char cend = *p++;
50202 +
50203 + if (cend == '\0')
50204 + return 1;
50205 +
50206 + if (cold <= fn && fn <= cend)
50207 + goto matched;
50208 +
50209 + c = *p++;
50210 + }
50211 + }
50212 +
50213 + if (c == ']')
50214 + break;
50215 + }
50216 + if (!not)
50217 + return 1;
50218 + break;
50219 + matched:
50220 + while (c != ']') {
50221 + if (c == '\0')
50222 + return 1;
50223 +
50224 + c = *p++;
50225 + }
50226 + if (not)
50227 + return 1;
50228 + }
50229 + break;
50230 + default:
50231 + if (c != *n)
50232 + return 1;
50233 + }
50234 +
50235 + ++n;
50236 + }
50237 +
50238 + if (*n == '\0')
50239 + return 0;
50240 +
50241 + if (*n == '/')
50242 + return 0;
50243 +
50244 + return 1;
50245 +}
50246 +
50247 +static struct acl_object_label *
50248 +chk_glob_label(struct acl_object_label *globbed,
50249 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
50250 +{
50251 + struct acl_object_label *tmp;
50252 +
50253 + if (*path == NULL)
50254 + *path = gr_to_filename_nolock(dentry, mnt);
50255 +
50256 + tmp = globbed;
50257 +
50258 + while (tmp) {
50259 + if (!glob_match(tmp->filename, *path))
50260 + return tmp;
50261 + tmp = tmp->next;
50262 + }
50263 +
50264 + return NULL;
50265 +}
50266 +
50267 +static struct acl_object_label *
50268 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50269 + const ino_t curr_ino, const dev_t curr_dev,
50270 + const struct acl_subject_label *subj, char **path, const int checkglob)
50271 +{
50272 + struct acl_subject_label *tmpsubj;
50273 + struct acl_object_label *retval;
50274 + struct acl_object_label *retval2;
50275 +
50276 + tmpsubj = (struct acl_subject_label *) subj;
50277 + read_lock(&gr_inode_lock);
50278 + do {
50279 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50280 + if (retval) {
50281 + if (checkglob && retval->globbed) {
50282 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
50283 + if (retval2)
50284 + retval = retval2;
50285 + }
50286 + break;
50287 + }
50288 + } while ((tmpsubj = tmpsubj->parent_subject));
50289 + read_unlock(&gr_inode_lock);
50290 +
50291 + return retval;
50292 +}
50293 +
50294 +static __inline__ struct acl_object_label *
50295 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50296 + struct dentry *curr_dentry,
50297 + const struct acl_subject_label *subj, char **path, const int checkglob)
50298 +{
50299 + int newglob = checkglob;
50300 + ino_t inode;
50301 + dev_t device;
50302 +
50303 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50304 + as we don't want a / * rule to match instead of the / object
50305 + don't do this for create lookups that call this function though, since they're looking up
50306 + on the parent and thus need globbing checks on all paths
50307 + */
50308 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50309 + newglob = GR_NO_GLOB;
50310 +
50311 + spin_lock(&curr_dentry->d_lock);
50312 + inode = curr_dentry->d_inode->i_ino;
50313 + device = __get_dev(curr_dentry);
50314 + spin_unlock(&curr_dentry->d_lock);
50315 +
50316 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50317 +}
50318 +
50319 +static struct acl_object_label *
50320 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50321 + const struct acl_subject_label *subj, char *path, const int checkglob)
50322 +{
50323 + struct dentry *dentry = (struct dentry *) l_dentry;
50324 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50325 + struct acl_object_label *retval;
50326 + struct dentry *parent;
50327 +
50328 + write_seqlock(&rename_lock);
50329 + br_read_lock(vfsmount_lock);
50330 +
50331 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50332 +#ifdef CONFIG_NET
50333 + mnt == sock_mnt ||
50334 +#endif
50335 +#ifdef CONFIG_HUGETLBFS
50336 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50337 +#endif
50338 + /* ignore Eric Biederman */
50339 + IS_PRIVATE(l_dentry->d_inode))) {
50340 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50341 + goto out;
50342 + }
50343 +
50344 + for (;;) {
50345 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50346 + break;
50347 +
50348 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50349 + if (mnt->mnt_parent == mnt)
50350 + break;
50351 +
50352 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50353 + if (retval != NULL)
50354 + goto out;
50355 +
50356 + dentry = mnt->mnt_mountpoint;
50357 + mnt = mnt->mnt_parent;
50358 + continue;
50359 + }
50360 +
50361 + parent = dentry->d_parent;
50362 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50363 + if (retval != NULL)
50364 + goto out;
50365 +
50366 + dentry = parent;
50367 + }
50368 +
50369 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50370 +
50371 + /* real_root is pinned so we don't have to hold a reference */
50372 + if (retval == NULL)
50373 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50374 +out:
50375 + br_read_unlock(vfsmount_lock);
50376 + write_sequnlock(&rename_lock);
50377 +
50378 + BUG_ON(retval == NULL);
50379 +
50380 + return retval;
50381 +}
50382 +
50383 +static __inline__ struct acl_object_label *
50384 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50385 + const struct acl_subject_label *subj)
50386 +{
50387 + char *path = NULL;
50388 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50389 +}
50390 +
50391 +static __inline__ struct acl_object_label *
50392 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50393 + const struct acl_subject_label *subj)
50394 +{
50395 + char *path = NULL;
50396 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50397 +}
50398 +
50399 +static __inline__ struct acl_object_label *
50400 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50401 + const struct acl_subject_label *subj, char *path)
50402 +{
50403 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50404 +}
50405 +
50406 +static struct acl_subject_label *
50407 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50408 + const struct acl_role_label *role)
50409 +{
50410 + struct dentry *dentry = (struct dentry *) l_dentry;
50411 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50412 + struct acl_subject_label *retval;
50413 + struct dentry *parent;
50414 +
50415 + write_seqlock(&rename_lock);
50416 + br_read_lock(vfsmount_lock);
50417 +
50418 + for (;;) {
50419 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50420 + break;
50421 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50422 + if (mnt->mnt_parent == mnt)
50423 + break;
50424 +
50425 + spin_lock(&dentry->d_lock);
50426 + read_lock(&gr_inode_lock);
50427 + retval =
50428 + lookup_acl_subj_label(dentry->d_inode->i_ino,
50429 + __get_dev(dentry), role);
50430 + read_unlock(&gr_inode_lock);
50431 + spin_unlock(&dentry->d_lock);
50432 + if (retval != NULL)
50433 + goto out;
50434 +
50435 + dentry = mnt->mnt_mountpoint;
50436 + mnt = mnt->mnt_parent;
50437 + continue;
50438 + }
50439 +
50440 + spin_lock(&dentry->d_lock);
50441 + read_lock(&gr_inode_lock);
50442 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50443 + __get_dev(dentry), role);
50444 + read_unlock(&gr_inode_lock);
50445 + parent = dentry->d_parent;
50446 + spin_unlock(&dentry->d_lock);
50447 +
50448 + if (retval != NULL)
50449 + goto out;
50450 +
50451 + dentry = parent;
50452 + }
50453 +
50454 + spin_lock(&dentry->d_lock);
50455 + read_lock(&gr_inode_lock);
50456 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50457 + __get_dev(dentry), role);
50458 + read_unlock(&gr_inode_lock);
50459 + spin_unlock(&dentry->d_lock);
50460 +
50461 + if (unlikely(retval == NULL)) {
50462 + /* real_root is pinned, we don't need to hold a reference */
50463 + read_lock(&gr_inode_lock);
50464 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50465 + __get_dev(real_root.dentry), role);
50466 + read_unlock(&gr_inode_lock);
50467 + }
50468 +out:
50469 + br_read_unlock(vfsmount_lock);
50470 + write_sequnlock(&rename_lock);
50471 +
50472 + BUG_ON(retval == NULL);
50473 +
50474 + return retval;
50475 +}
50476 +
50477 +static void
50478 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50479 +{
50480 + struct task_struct *task = current;
50481 + const struct cred *cred = current_cred();
50482 +
50483 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50484 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50485 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50486 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50487 +
50488 + return;
50489 +}
50490 +
50491 +static void
50492 +gr_log_learn_sysctl(const char *path, const __u32 mode)
50493 +{
50494 + struct task_struct *task = current;
50495 + const struct cred *cred = current_cred();
50496 +
50497 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50498 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50499 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50500 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50501 +
50502 + return;
50503 +}
50504 +
50505 +static void
50506 +gr_log_learn_id_change(const char type, const unsigned int real,
50507 + const unsigned int effective, const unsigned int fs)
50508 +{
50509 + struct task_struct *task = current;
50510 + const struct cred *cred = current_cred();
50511 +
50512 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50513 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50514 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50515 + type, real, effective, fs, &task->signal->saved_ip);
50516 +
50517 + return;
50518 +}
50519 +
50520 +__u32
50521 +gr_search_file(const struct dentry * dentry, const __u32 mode,
50522 + const struct vfsmount * mnt)
50523 +{
50524 + __u32 retval = mode;
50525 + struct acl_subject_label *curracl;
50526 + struct acl_object_label *currobj;
50527 +
50528 + if (unlikely(!(gr_status & GR_READY)))
50529 + return (mode & ~GR_AUDITS);
50530 +
50531 + curracl = current->acl;
50532 +
50533 + currobj = chk_obj_label(dentry, mnt, curracl);
50534 + retval = currobj->mode & mode;
50535 +
50536 + /* if we're opening a specified transfer file for writing
50537 + (e.g. /dev/initctl), then transfer our role to init
50538 + */
50539 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50540 + current->role->roletype & GR_ROLE_PERSIST)) {
50541 + struct task_struct *task = init_pid_ns.child_reaper;
50542 +
50543 + if (task->role != current->role) {
50544 + task->acl_sp_role = 0;
50545 + task->acl_role_id = current->acl_role_id;
50546 + task->role = current->role;
50547 + rcu_read_lock();
50548 + read_lock(&grsec_exec_file_lock);
50549 + gr_apply_subject_to_task(task);
50550 + read_unlock(&grsec_exec_file_lock);
50551 + rcu_read_unlock();
50552 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50553 + }
50554 + }
50555 +
50556 + if (unlikely
50557 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50558 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50559 + __u32 new_mode = mode;
50560 +
50561 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50562 +
50563 + retval = new_mode;
50564 +
50565 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50566 + new_mode |= GR_INHERIT;
50567 +
50568 + if (!(mode & GR_NOLEARN))
50569 + gr_log_learn(dentry, mnt, new_mode);
50570 + }
50571 +
50572 + return retval;
50573 +}
50574 +
50575 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50576 + const struct dentry *parent,
50577 + const struct vfsmount *mnt)
50578 +{
50579 + struct name_entry *match;
50580 + struct acl_object_label *matchpo;
50581 + struct acl_subject_label *curracl;
50582 + char *path;
50583 +
50584 + if (unlikely(!(gr_status & GR_READY)))
50585 + return NULL;
50586 +
50587 + preempt_disable();
50588 + path = gr_to_filename_rbac(new_dentry, mnt);
50589 + match = lookup_name_entry_create(path);
50590 +
50591 + curracl = current->acl;
50592 +
50593 + if (match) {
50594 + read_lock(&gr_inode_lock);
50595 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50596 + read_unlock(&gr_inode_lock);
50597 +
50598 + if (matchpo) {
50599 + preempt_enable();
50600 + return matchpo;
50601 + }
50602 + }
50603 +
50604 + // lookup parent
50605 +
50606 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50607 +
50608 + preempt_enable();
50609 + return matchpo;
50610 +}
50611 +
50612 +__u32
50613 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50614 + const struct vfsmount * mnt, const __u32 mode)
50615 +{
50616 + struct acl_object_label *matchpo;
50617 + __u32 retval;
50618 +
50619 + if (unlikely(!(gr_status & GR_READY)))
50620 + return (mode & ~GR_AUDITS);
50621 +
50622 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
50623 +
50624 + retval = matchpo->mode & mode;
50625 +
50626 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50627 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50628 + __u32 new_mode = mode;
50629 +
50630 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50631 +
50632 + gr_log_learn(new_dentry, mnt, new_mode);
50633 + return new_mode;
50634 + }
50635 +
50636 + return retval;
50637 +}
50638 +
50639 +__u32
50640 +gr_check_link(const struct dentry * new_dentry,
50641 + const struct dentry * parent_dentry,
50642 + const struct vfsmount * parent_mnt,
50643 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50644 +{
50645 + struct acl_object_label *obj;
50646 + __u32 oldmode, newmode;
50647 + __u32 needmode;
50648 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50649 + GR_DELETE | GR_INHERIT;
50650 +
50651 + if (unlikely(!(gr_status & GR_READY)))
50652 + return (GR_CREATE | GR_LINK);
50653 +
50654 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50655 + oldmode = obj->mode;
50656 +
50657 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50658 + newmode = obj->mode;
50659 +
50660 + needmode = newmode & checkmodes;
50661 +
50662 + // old name for hardlink must have at least the permissions of the new name
50663 + if ((oldmode & needmode) != needmode)
50664 + goto bad;
50665 +
50666 + // if old name had restrictions/auditing, make sure the new name does as well
50667 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50668 +
50669 + // don't allow hardlinking of suid/sgid files without permission
50670 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50671 + needmode |= GR_SETID;
50672 +
50673 + if ((newmode & needmode) != needmode)
50674 + goto bad;
50675 +
50676 + // enforce minimum permissions
50677 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50678 + return newmode;
50679 +bad:
50680 + needmode = oldmode;
50681 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50682 + needmode |= GR_SETID;
50683 +
50684 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50685 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50686 + return (GR_CREATE | GR_LINK);
50687 + } else if (newmode & GR_SUPPRESS)
50688 + return GR_SUPPRESS;
50689 + else
50690 + return 0;
50691 +}
50692 +
50693 +int
50694 +gr_check_hidden_task(const struct task_struct *task)
50695 +{
50696 + if (unlikely(!(gr_status & GR_READY)))
50697 + return 0;
50698 +
50699 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50700 + return 1;
50701 +
50702 + return 0;
50703 +}
50704 +
50705 +int
50706 +gr_check_protected_task(const struct task_struct *task)
50707 +{
50708 + if (unlikely(!(gr_status & GR_READY) || !task))
50709 + return 0;
50710 +
50711 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50712 + task->acl != current->acl)
50713 + return 1;
50714 +
50715 + return 0;
50716 +}
50717 +
50718 +int
50719 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50720 +{
50721 + struct task_struct *p;
50722 + int ret = 0;
50723 +
50724 + if (unlikely(!(gr_status & GR_READY) || !pid))
50725 + return ret;
50726 +
50727 + read_lock(&tasklist_lock);
50728 + do_each_pid_task(pid, type, p) {
50729 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50730 + p->acl != current->acl) {
50731 + ret = 1;
50732 + goto out;
50733 + }
50734 + } while_each_pid_task(pid, type, p);
50735 +out:
50736 + read_unlock(&tasklist_lock);
50737 +
50738 + return ret;
50739 +}
50740 +
50741 +void
50742 +gr_copy_label(struct task_struct *tsk)
50743 +{
50744 + /* plain copying of fields is already done by dup_task_struct */
50745 + tsk->signal->used_accept = 0;
50746 + tsk->acl_sp_role = 0;
50747 + //tsk->acl_role_id = current->acl_role_id;
50748 + //tsk->acl = current->acl;
50749 + //tsk->role = current->role;
50750 + tsk->signal->curr_ip = current->signal->curr_ip;
50751 + tsk->signal->saved_ip = current->signal->saved_ip;
50752 + if (current->exec_file)
50753 + get_file(current->exec_file);
50754 + //tsk->exec_file = current->exec_file;
50755 + //tsk->is_writable = current->is_writable;
50756 + if (unlikely(current->signal->used_accept)) {
50757 + current->signal->curr_ip = 0;
50758 + current->signal->saved_ip = 0;
50759 + }
50760 +
50761 + return;
50762 +}
50763 +
50764 +static void
50765 +gr_set_proc_res(struct task_struct *task)
50766 +{
50767 + struct acl_subject_label *proc;
50768 + unsigned short i;
50769 +
50770 + proc = task->acl;
50771 +
50772 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50773 + return;
50774 +
50775 + for (i = 0; i < RLIM_NLIMITS; i++) {
50776 + if (!(proc->resmask & (1 << i)))
50777 + continue;
50778 +
50779 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50780 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50781 + }
50782 +
50783 + return;
50784 +}
50785 +
50786 +extern int __gr_process_user_ban(struct user_struct *user);
50787 +
50788 +int
50789 +gr_check_user_change(int real, int effective, int fs)
50790 +{
50791 + unsigned int i;
50792 + __u16 num;
50793 + uid_t *uidlist;
50794 + int curuid;
50795 + int realok = 0;
50796 + int effectiveok = 0;
50797 + int fsok = 0;
50798 +
50799 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50800 + struct user_struct *user;
50801 +
50802 + if (real == -1)
50803 + goto skipit;
50804 +
50805 + user = find_user(real);
50806 + if (user == NULL)
50807 + goto skipit;
50808 +
50809 + if (__gr_process_user_ban(user)) {
50810 + /* for find_user */
50811 + free_uid(user);
50812 + return 1;
50813 + }
50814 +
50815 + /* for find_user */
50816 + free_uid(user);
50817 +
50818 +skipit:
50819 +#endif
50820 +
50821 + if (unlikely(!(gr_status & GR_READY)))
50822 + return 0;
50823 +
50824 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50825 + gr_log_learn_id_change('u', real, effective, fs);
50826 +
50827 + num = current->acl->user_trans_num;
50828 + uidlist = current->acl->user_transitions;
50829 +
50830 + if (uidlist == NULL)
50831 + return 0;
50832 +
50833 + if (real == -1)
50834 + realok = 1;
50835 + if (effective == -1)
50836 + effectiveok = 1;
50837 + if (fs == -1)
50838 + fsok = 1;
50839 +
50840 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
50841 + for (i = 0; i < num; i++) {
50842 + curuid = (int)uidlist[i];
50843 + if (real == curuid)
50844 + realok = 1;
50845 + if (effective == curuid)
50846 + effectiveok = 1;
50847 + if (fs == curuid)
50848 + fsok = 1;
50849 + }
50850 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
50851 + for (i = 0; i < num; i++) {
50852 + curuid = (int)uidlist[i];
50853 + if (real == curuid)
50854 + break;
50855 + if (effective == curuid)
50856 + break;
50857 + if (fs == curuid)
50858 + break;
50859 + }
50860 + /* not in deny list */
50861 + if (i == num) {
50862 + realok = 1;
50863 + effectiveok = 1;
50864 + fsok = 1;
50865 + }
50866 + }
50867 +
50868 + if (realok && effectiveok && fsok)
50869 + return 0;
50870 + else {
50871 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50872 + return 1;
50873 + }
50874 +}
50875 +
50876 +int
50877 +gr_check_group_change(int real, int effective, int fs)
50878 +{
50879 + unsigned int i;
50880 + __u16 num;
50881 + gid_t *gidlist;
50882 + int curgid;
50883 + int realok = 0;
50884 + int effectiveok = 0;
50885 + int fsok = 0;
50886 +
50887 + if (unlikely(!(gr_status & GR_READY)))
50888 + return 0;
50889 +
50890 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50891 + gr_log_learn_id_change('g', real, effective, fs);
50892 +
50893 + num = current->acl->group_trans_num;
50894 + gidlist = current->acl->group_transitions;
50895 +
50896 + if (gidlist == NULL)
50897 + return 0;
50898 +
50899 + if (real == -1)
50900 + realok = 1;
50901 + if (effective == -1)
50902 + effectiveok = 1;
50903 + if (fs == -1)
50904 + fsok = 1;
50905 +
50906 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
50907 + for (i = 0; i < num; i++) {
50908 + curgid = (int)gidlist[i];
50909 + if (real == curgid)
50910 + realok = 1;
50911 + if (effective == curgid)
50912 + effectiveok = 1;
50913 + if (fs == curgid)
50914 + fsok = 1;
50915 + }
50916 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
50917 + for (i = 0; i < num; i++) {
50918 + curgid = (int)gidlist[i];
50919 + if (real == curgid)
50920 + break;
50921 + if (effective == curgid)
50922 + break;
50923 + if (fs == curgid)
50924 + break;
50925 + }
50926 + /* not in deny list */
50927 + if (i == num) {
50928 + realok = 1;
50929 + effectiveok = 1;
50930 + fsok = 1;
50931 + }
50932 + }
50933 +
50934 + if (realok && effectiveok && fsok)
50935 + return 0;
50936 + else {
50937 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50938 + return 1;
50939 + }
50940 +}
50941 +
50942 +extern int gr_acl_is_capable(const int cap);
50943 +
50944 +void
50945 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50946 +{
50947 + struct acl_role_label *role = task->role;
50948 + struct acl_subject_label *subj = NULL;
50949 + struct acl_object_label *obj;
50950 + struct file *filp;
50951 +
50952 + if (unlikely(!(gr_status & GR_READY)))
50953 + return;
50954 +
50955 + filp = task->exec_file;
50956 +
50957 + /* kernel process, we'll give them the kernel role */
50958 + if (unlikely(!filp)) {
50959 + task->role = kernel_role;
50960 + task->acl = kernel_role->root_label;
50961 + return;
50962 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50963 + role = lookup_acl_role_label(task, uid, gid);
50964 +
50965 + /* don't change the role if we're not a privileged process */
50966 + if (role && task->role != role &&
50967 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
50968 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
50969 + return;
50970 +
50971 + /* perform subject lookup in possibly new role
50972 + we can use this result below in the case where role == task->role
50973 + */
50974 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50975 +
50976 + /* if we changed uid/gid, but result in the same role
50977 + and are using inheritance, don't lose the inherited subject
50978 + if current subject is other than what normal lookup
50979 + would result in, we arrived via inheritance, don't
50980 + lose subject
50981 + */
50982 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50983 + (subj == task->acl)))
50984 + task->acl = subj;
50985 +
50986 + task->role = role;
50987 +
50988 + task->is_writable = 0;
50989 +
50990 + /* ignore additional mmap checks for processes that are writable
50991 + by the default ACL */
50992 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50993 + if (unlikely(obj->mode & GR_WRITE))
50994 + task->is_writable = 1;
50995 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50996 + if (unlikely(obj->mode & GR_WRITE))
50997 + task->is_writable = 1;
50998 +
50999 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51000 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51001 +#endif
51002 +
51003 + gr_set_proc_res(task);
51004 +
51005 + return;
51006 +}
51007 +
51008 +int
51009 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
51010 + const int unsafe_flags)
51011 +{
51012 + struct task_struct *task = current;
51013 + struct acl_subject_label *newacl;
51014 + struct acl_object_label *obj;
51015 + __u32 retmode;
51016 +
51017 + if (unlikely(!(gr_status & GR_READY)))
51018 + return 0;
51019 +
51020 + newacl = chk_subj_label(dentry, mnt, task->role);
51021 +
51022 + task_lock(task);
51023 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
51024 + !(task->role->roletype & GR_ROLE_GOD) &&
51025 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
51026 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51027 + task_unlock(task);
51028 + if (unsafe_flags & LSM_UNSAFE_SHARE)
51029 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
51030 + else
51031 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
51032 + return -EACCES;
51033 + }
51034 + task_unlock(task);
51035 +
51036 + obj = chk_obj_label(dentry, mnt, task->acl);
51037 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
51038 +
51039 + if (!(task->acl->mode & GR_INHERITLEARN) &&
51040 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
51041 + if (obj->nested)
51042 + task->acl = obj->nested;
51043 + else
51044 + task->acl = newacl;
51045 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
51046 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
51047 +
51048 + task->is_writable = 0;
51049 +
51050 + /* ignore additional mmap checks for processes that are writable
51051 + by the default ACL */
51052 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
51053 + if (unlikely(obj->mode & GR_WRITE))
51054 + task->is_writable = 1;
51055 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
51056 + if (unlikely(obj->mode & GR_WRITE))
51057 + task->is_writable = 1;
51058 +
51059 + gr_set_proc_res(task);
51060 +
51061 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51062 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51063 +#endif
51064 + return 0;
51065 +}
51066 +
51067 +/* always called with valid inodev ptr */
51068 +static void
51069 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
51070 +{
51071 + struct acl_object_label *matchpo;
51072 + struct acl_subject_label *matchps;
51073 + struct acl_subject_label *subj;
51074 + struct acl_role_label *role;
51075 + unsigned int x;
51076 +
51077 + FOR_EACH_ROLE_START(role)
51078 + FOR_EACH_SUBJECT_START(role, subj, x)
51079 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
51080 + matchpo->mode |= GR_DELETED;
51081 + FOR_EACH_SUBJECT_END(subj,x)
51082 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51083 + if (subj->inode == ino && subj->device == dev)
51084 + subj->mode |= GR_DELETED;
51085 + FOR_EACH_NESTED_SUBJECT_END(subj)
51086 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
51087 + matchps->mode |= GR_DELETED;
51088 + FOR_EACH_ROLE_END(role)
51089 +
51090 + inodev->nentry->deleted = 1;
51091 +
51092 + return;
51093 +}
51094 +
51095 +void
51096 +gr_handle_delete(const ino_t ino, const dev_t dev)
51097 +{
51098 + struct inodev_entry *inodev;
51099 +
51100 + if (unlikely(!(gr_status & GR_READY)))
51101 + return;
51102 +
51103 + write_lock(&gr_inode_lock);
51104 + inodev = lookup_inodev_entry(ino, dev);
51105 + if (inodev != NULL)
51106 + do_handle_delete(inodev, ino, dev);
51107 + write_unlock(&gr_inode_lock);
51108 +
51109 + return;
51110 +}
51111 +
51112 +static void
51113 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
51114 + const ino_t newinode, const dev_t newdevice,
51115 + struct acl_subject_label *subj)
51116 +{
51117 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
51118 + struct acl_object_label *match;
51119 +
51120 + match = subj->obj_hash[index];
51121 +
51122 + while (match && (match->inode != oldinode ||
51123 + match->device != olddevice ||
51124 + !(match->mode & GR_DELETED)))
51125 + match = match->next;
51126 +
51127 + if (match && (match->inode == oldinode)
51128 + && (match->device == olddevice)
51129 + && (match->mode & GR_DELETED)) {
51130 + if (match->prev == NULL) {
51131 + subj->obj_hash[index] = match->next;
51132 + if (match->next != NULL)
51133 + match->next->prev = NULL;
51134 + } else {
51135 + match->prev->next = match->next;
51136 + if (match->next != NULL)
51137 + match->next->prev = match->prev;
51138 + }
51139 + match->prev = NULL;
51140 + match->next = NULL;
51141 + match->inode = newinode;
51142 + match->device = newdevice;
51143 + match->mode &= ~GR_DELETED;
51144 +
51145 + insert_acl_obj_label(match, subj);
51146 + }
51147 +
51148 + return;
51149 +}
51150 +
51151 +static void
51152 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
51153 + const ino_t newinode, const dev_t newdevice,
51154 + struct acl_role_label *role)
51155 +{
51156 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
51157 + struct acl_subject_label *match;
51158 +
51159 + match = role->subj_hash[index];
51160 +
51161 + while (match && (match->inode != oldinode ||
51162 + match->device != olddevice ||
51163 + !(match->mode & GR_DELETED)))
51164 + match = match->next;
51165 +
51166 + if (match && (match->inode == oldinode)
51167 + && (match->device == olddevice)
51168 + && (match->mode & GR_DELETED)) {
51169 + if (match->prev == NULL) {
51170 + role->subj_hash[index] = match->next;
51171 + if (match->next != NULL)
51172 + match->next->prev = NULL;
51173 + } else {
51174 + match->prev->next = match->next;
51175 + if (match->next != NULL)
51176 + match->next->prev = match->prev;
51177 + }
51178 + match->prev = NULL;
51179 + match->next = NULL;
51180 + match->inode = newinode;
51181 + match->device = newdevice;
51182 + match->mode &= ~GR_DELETED;
51183 +
51184 + insert_acl_subj_label(match, role);
51185 + }
51186 +
51187 + return;
51188 +}
51189 +
51190 +static void
51191 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
51192 + const ino_t newinode, const dev_t newdevice)
51193 +{
51194 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51195 + struct inodev_entry *match;
51196 +
51197 + match = inodev_set.i_hash[index];
51198 +
51199 + while (match && (match->nentry->inode != oldinode ||
51200 + match->nentry->device != olddevice || !match->nentry->deleted))
51201 + match = match->next;
51202 +
51203 + if (match && (match->nentry->inode == oldinode)
51204 + && (match->nentry->device == olddevice) &&
51205 + match->nentry->deleted) {
51206 + if (match->prev == NULL) {
51207 + inodev_set.i_hash[index] = match->next;
51208 + if (match->next != NULL)
51209 + match->next->prev = NULL;
51210 + } else {
51211 + match->prev->next = match->next;
51212 + if (match->next != NULL)
51213 + match->next->prev = match->prev;
51214 + }
51215 + match->prev = NULL;
51216 + match->next = NULL;
51217 + match->nentry->inode = newinode;
51218 + match->nentry->device = newdevice;
51219 + match->nentry->deleted = 0;
51220 +
51221 + insert_inodev_entry(match);
51222 + }
51223 +
51224 + return;
51225 +}
51226 +
51227 +static void
51228 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51229 +{
51230 + struct acl_subject_label *subj;
51231 + struct acl_role_label *role;
51232 + unsigned int x;
51233 +
51234 + FOR_EACH_ROLE_START(role)
51235 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51236 +
51237 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51238 + if ((subj->inode == ino) && (subj->device == dev)) {
51239 + subj->inode = ino;
51240 + subj->device = dev;
51241 + }
51242 + FOR_EACH_NESTED_SUBJECT_END(subj)
51243 + FOR_EACH_SUBJECT_START(role, subj, x)
51244 + update_acl_obj_label(matchn->inode, matchn->device,
51245 + ino, dev, subj);
51246 + FOR_EACH_SUBJECT_END(subj,x)
51247 + FOR_EACH_ROLE_END(role)
51248 +
51249 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51250 +
51251 + return;
51252 +}
51253 +
51254 +static void
51255 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51256 + const struct vfsmount *mnt)
51257 +{
51258 + ino_t ino = dentry->d_inode->i_ino;
51259 + dev_t dev = __get_dev(dentry);
51260 +
51261 + __do_handle_create(matchn, ino, dev);
51262 +
51263 + return;
51264 +}
51265 +
51266 +void
51267 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51268 +{
51269 + struct name_entry *matchn;
51270 +
51271 + if (unlikely(!(gr_status & GR_READY)))
51272 + return;
51273 +
51274 + preempt_disable();
51275 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51276 +
51277 + if (unlikely((unsigned long)matchn)) {
51278 + write_lock(&gr_inode_lock);
51279 + do_handle_create(matchn, dentry, mnt);
51280 + write_unlock(&gr_inode_lock);
51281 + }
51282 + preempt_enable();
51283 +
51284 + return;
51285 +}
51286 +
51287 +void
51288 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51289 +{
51290 + struct name_entry *matchn;
51291 +
51292 + if (unlikely(!(gr_status & GR_READY)))
51293 + return;
51294 +
51295 + preempt_disable();
51296 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51297 +
51298 + if (unlikely((unsigned long)matchn)) {
51299 + write_lock(&gr_inode_lock);
51300 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51301 + write_unlock(&gr_inode_lock);
51302 + }
51303 + preempt_enable();
51304 +
51305 + return;
51306 +}
51307 +
51308 +void
51309 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51310 + struct dentry *old_dentry,
51311 + struct dentry *new_dentry,
51312 + struct vfsmount *mnt, const __u8 replace)
51313 +{
51314 + struct name_entry *matchn;
51315 + struct inodev_entry *inodev;
51316 + struct inode *inode = new_dentry->d_inode;
51317 + ino_t old_ino = old_dentry->d_inode->i_ino;
51318 + dev_t old_dev = __get_dev(old_dentry);
51319 +
51320 + /* vfs_rename swaps the name and parent link for old_dentry and
51321 + new_dentry
51322 + at this point, old_dentry has the new name, parent link, and inode
51323 + for the renamed file
51324 + if a file is being replaced by a rename, new_dentry has the inode
51325 + and name for the replaced file
51326 + */
51327 +
51328 + if (unlikely(!(gr_status & GR_READY)))
51329 + return;
51330 +
51331 + preempt_disable();
51332 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51333 +
51334 + /* we wouldn't have to check d_inode if it weren't for
51335 + NFS silly-renaming
51336 + */
51337 +
51338 + write_lock(&gr_inode_lock);
51339 + if (unlikely(replace && inode)) {
51340 + ino_t new_ino = inode->i_ino;
51341 + dev_t new_dev = __get_dev(new_dentry);
51342 +
51343 + inodev = lookup_inodev_entry(new_ino, new_dev);
51344 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51345 + do_handle_delete(inodev, new_ino, new_dev);
51346 + }
51347 +
51348 + inodev = lookup_inodev_entry(old_ino, old_dev);
51349 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51350 + do_handle_delete(inodev, old_ino, old_dev);
51351 +
51352 + if (unlikely((unsigned long)matchn))
51353 + do_handle_create(matchn, old_dentry, mnt);
51354 +
51355 + write_unlock(&gr_inode_lock);
51356 + preempt_enable();
51357 +
51358 + return;
51359 +}
51360 +
51361 +static int
51362 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51363 + unsigned char **sum)
51364 +{
51365 + struct acl_role_label *r;
51366 + struct role_allowed_ip *ipp;
51367 + struct role_transition *trans;
51368 + unsigned int i;
51369 + int found = 0;
51370 + u32 curr_ip = current->signal->curr_ip;
51371 +
51372 + current->signal->saved_ip = curr_ip;
51373 +
51374 + /* check transition table */
51375 +
51376 + for (trans = current->role->transitions; trans; trans = trans->next) {
51377 + if (!strcmp(rolename, trans->rolename)) {
51378 + found = 1;
51379 + break;
51380 + }
51381 + }
51382 +
51383 + if (!found)
51384 + return 0;
51385 +
51386 + /* handle special roles that do not require authentication
51387 + and check ip */
51388 +
51389 + FOR_EACH_ROLE_START(r)
51390 + if (!strcmp(rolename, r->rolename) &&
51391 + (r->roletype & GR_ROLE_SPECIAL)) {
51392 + found = 0;
51393 + if (r->allowed_ips != NULL) {
51394 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51395 + if ((ntohl(curr_ip) & ipp->netmask) ==
51396 + (ntohl(ipp->addr) & ipp->netmask))
51397 + found = 1;
51398 + }
51399 + } else
51400 + found = 2;
51401 + if (!found)
51402 + return 0;
51403 +
51404 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51405 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51406 + *salt = NULL;
51407 + *sum = NULL;
51408 + return 1;
51409 + }
51410 + }
51411 + FOR_EACH_ROLE_END(r)
51412 +
51413 + for (i = 0; i < num_sprole_pws; i++) {
51414 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51415 + *salt = acl_special_roles[i]->salt;
51416 + *sum = acl_special_roles[i]->sum;
51417 + return 1;
51418 + }
51419 + }
51420 +
51421 + return 0;
51422 +}
51423 +
51424 +static void
51425 +assign_special_role(char *rolename)
51426 +{
51427 + struct acl_object_label *obj;
51428 + struct acl_role_label *r;
51429 + struct acl_role_label *assigned = NULL;
51430 + struct task_struct *tsk;
51431 + struct file *filp;
51432 +
51433 + FOR_EACH_ROLE_START(r)
51434 + if (!strcmp(rolename, r->rolename) &&
51435 + (r->roletype & GR_ROLE_SPECIAL)) {
51436 + assigned = r;
51437 + break;
51438 + }
51439 + FOR_EACH_ROLE_END(r)
51440 +
51441 + if (!assigned)
51442 + return;
51443 +
51444 + read_lock(&tasklist_lock);
51445 + read_lock(&grsec_exec_file_lock);
51446 +
51447 + tsk = current->real_parent;
51448 + if (tsk == NULL)
51449 + goto out_unlock;
51450 +
51451 + filp = tsk->exec_file;
51452 + if (filp == NULL)
51453 + goto out_unlock;
51454 +
51455 + tsk->is_writable = 0;
51456 +
51457 + tsk->acl_sp_role = 1;
51458 + tsk->acl_role_id = ++acl_sp_role_value;
51459 + tsk->role = assigned;
51460 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51461 +
51462 + /* ignore additional mmap checks for processes that are writable
51463 + by the default ACL */
51464 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51465 + if (unlikely(obj->mode & GR_WRITE))
51466 + tsk->is_writable = 1;
51467 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51468 + if (unlikely(obj->mode & GR_WRITE))
51469 + tsk->is_writable = 1;
51470 +
51471 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51472 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51473 +#endif
51474 +
51475 +out_unlock:
51476 + read_unlock(&grsec_exec_file_lock);
51477 + read_unlock(&tasklist_lock);
51478 + return;
51479 +}
51480 +
51481 +int gr_check_secure_terminal(struct task_struct *task)
51482 +{
51483 + struct task_struct *p, *p2, *p3;
51484 + struct files_struct *files;
51485 + struct fdtable *fdt;
51486 + struct file *our_file = NULL, *file;
51487 + int i;
51488 +
51489 + if (task->signal->tty == NULL)
51490 + return 1;
51491 +
51492 + files = get_files_struct(task);
51493 + if (files != NULL) {
51494 + rcu_read_lock();
51495 + fdt = files_fdtable(files);
51496 + for (i=0; i < fdt->max_fds; i++) {
51497 + file = fcheck_files(files, i);
51498 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51499 + get_file(file);
51500 + our_file = file;
51501 + }
51502 + }
51503 + rcu_read_unlock();
51504 + put_files_struct(files);
51505 + }
51506 +
51507 + if (our_file == NULL)
51508 + return 1;
51509 +
51510 + read_lock(&tasklist_lock);
51511 + do_each_thread(p2, p) {
51512 + files = get_files_struct(p);
51513 + if (files == NULL ||
51514 + (p->signal && p->signal->tty == task->signal->tty)) {
51515 + if (files != NULL)
51516 + put_files_struct(files);
51517 + continue;
51518 + }
51519 + rcu_read_lock();
51520 + fdt = files_fdtable(files);
51521 + for (i=0; i < fdt->max_fds; i++) {
51522 + file = fcheck_files(files, i);
51523 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51524 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51525 + p3 = task;
51526 + while (p3->pid > 0) {
51527 + if (p3 == p)
51528 + break;
51529 + p3 = p3->real_parent;
51530 + }
51531 + if (p3 == p)
51532 + break;
51533 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51534 + gr_handle_alertkill(p);
51535 + rcu_read_unlock();
51536 + put_files_struct(files);
51537 + read_unlock(&tasklist_lock);
51538 + fput(our_file);
51539 + return 0;
51540 + }
51541 + }
51542 + rcu_read_unlock();
51543 + put_files_struct(files);
51544 + } while_each_thread(p2, p);
51545 + read_unlock(&tasklist_lock);
51546 +
51547 + fput(our_file);
51548 + return 1;
51549 +}
51550 +
51551 +ssize_t
51552 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51553 +{
51554 + struct gr_arg_wrapper uwrap;
51555 + unsigned char *sprole_salt = NULL;
51556 + unsigned char *sprole_sum = NULL;
51557 + int error = sizeof (struct gr_arg_wrapper);
51558 + int error2 = 0;
51559 +
51560 + mutex_lock(&gr_dev_mutex);
51561 +
51562 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51563 + error = -EPERM;
51564 + goto out;
51565 + }
51566 +
51567 + if (count != sizeof (struct gr_arg_wrapper)) {
51568 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51569 + error = -EINVAL;
51570 + goto out;
51571 + }
51572 +
51573 +
51574 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51575 + gr_auth_expires = 0;
51576 + gr_auth_attempts = 0;
51577 + }
51578 +
51579 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51580 + error = -EFAULT;
51581 + goto out;
51582 + }
51583 +
51584 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51585 + error = -EINVAL;
51586 + goto out;
51587 + }
51588 +
51589 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51590 + error = -EFAULT;
51591 + goto out;
51592 + }
51593 +
51594 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51595 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51596 + time_after(gr_auth_expires, get_seconds())) {
51597 + error = -EBUSY;
51598 + goto out;
51599 + }
51600 +
51601 + /* if non-root trying to do anything other than use a special role,
51602 + do not attempt authentication, do not count towards authentication
51603 + locking
51604 + */
51605 +
51606 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51607 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51608 + current_uid()) {
51609 + error = -EPERM;
51610 + goto out;
51611 + }
51612 +
51613 + /* ensure pw and special role name are null terminated */
51614 +
51615 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51616 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51617 +
51618 + /* Okay.
51619 + * We have our enough of the argument structure..(we have yet
51620 + * to copy_from_user the tables themselves) . Copy the tables
51621 + * only if we need them, i.e. for loading operations. */
51622 +
51623 + switch (gr_usermode->mode) {
51624 + case GR_STATUS:
51625 + if (gr_status & GR_READY) {
51626 + error = 1;
51627 + if (!gr_check_secure_terminal(current))
51628 + error = 3;
51629 + } else
51630 + error = 2;
51631 + goto out;
51632 + case GR_SHUTDOWN:
51633 + if ((gr_status & GR_READY)
51634 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51635 + pax_open_kernel();
51636 + gr_status &= ~GR_READY;
51637 + pax_close_kernel();
51638 +
51639 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51640 + free_variables();
51641 + memset(gr_usermode, 0, sizeof (struct gr_arg));
51642 + memset(gr_system_salt, 0, GR_SALT_LEN);
51643 + memset(gr_system_sum, 0, GR_SHA_LEN);
51644 + } else if (gr_status & GR_READY) {
51645 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51646 + error = -EPERM;
51647 + } else {
51648 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51649 + error = -EAGAIN;
51650 + }
51651 + break;
51652 + case GR_ENABLE:
51653 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51654 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51655 + else {
51656 + if (gr_status & GR_READY)
51657 + error = -EAGAIN;
51658 + else
51659 + error = error2;
51660 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51661 + }
51662 + break;
51663 + case GR_RELOAD:
51664 + if (!(gr_status & GR_READY)) {
51665 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51666 + error = -EAGAIN;
51667 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51668 + preempt_disable();
51669 +
51670 + pax_open_kernel();
51671 + gr_status &= ~GR_READY;
51672 + pax_close_kernel();
51673 +
51674 + free_variables();
51675 + if (!(error2 = gracl_init(gr_usermode))) {
51676 + preempt_enable();
51677 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51678 + } else {
51679 + preempt_enable();
51680 + error = error2;
51681 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51682 + }
51683 + } else {
51684 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51685 + error = -EPERM;
51686 + }
51687 + break;
51688 + case GR_SEGVMOD:
51689 + if (unlikely(!(gr_status & GR_READY))) {
51690 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51691 + error = -EAGAIN;
51692 + break;
51693 + }
51694 +
51695 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51696 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51697 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51698 + struct acl_subject_label *segvacl;
51699 + segvacl =
51700 + lookup_acl_subj_label(gr_usermode->segv_inode,
51701 + gr_usermode->segv_device,
51702 + current->role);
51703 + if (segvacl) {
51704 + segvacl->crashes = 0;
51705 + segvacl->expires = 0;
51706 + }
51707 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51708 + gr_remove_uid(gr_usermode->segv_uid);
51709 + }
51710 + } else {
51711 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51712 + error = -EPERM;
51713 + }
51714 + break;
51715 + case GR_SPROLE:
51716 + case GR_SPROLEPAM:
51717 + if (unlikely(!(gr_status & GR_READY))) {
51718 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51719 + error = -EAGAIN;
51720 + break;
51721 + }
51722 +
51723 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51724 + current->role->expires = 0;
51725 + current->role->auth_attempts = 0;
51726 + }
51727 +
51728 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51729 + time_after(current->role->expires, get_seconds())) {
51730 + error = -EBUSY;
51731 + goto out;
51732 + }
51733 +
51734 + if (lookup_special_role_auth
51735 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51736 + && ((!sprole_salt && !sprole_sum)
51737 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51738 + char *p = "";
51739 + assign_special_role(gr_usermode->sp_role);
51740 + read_lock(&tasklist_lock);
51741 + if (current->real_parent)
51742 + p = current->real_parent->role->rolename;
51743 + read_unlock(&tasklist_lock);
51744 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51745 + p, acl_sp_role_value);
51746 + } else {
51747 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51748 + error = -EPERM;
51749 + if(!(current->role->auth_attempts++))
51750 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51751 +
51752 + goto out;
51753 + }
51754 + break;
51755 + case GR_UNSPROLE:
51756 + if (unlikely(!(gr_status & GR_READY))) {
51757 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51758 + error = -EAGAIN;
51759 + break;
51760 + }
51761 +
51762 + if (current->role->roletype & GR_ROLE_SPECIAL) {
51763 + char *p = "";
51764 + int i = 0;
51765 +
51766 + read_lock(&tasklist_lock);
51767 + if (current->real_parent) {
51768 + p = current->real_parent->role->rolename;
51769 + i = current->real_parent->acl_role_id;
51770 + }
51771 + read_unlock(&tasklist_lock);
51772 +
51773 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51774 + gr_set_acls(1);
51775 + } else {
51776 + error = -EPERM;
51777 + goto out;
51778 + }
51779 + break;
51780 + default:
51781 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51782 + error = -EINVAL;
51783 + break;
51784 + }
51785 +
51786 + if (error != -EPERM)
51787 + goto out;
51788 +
51789 + if(!(gr_auth_attempts++))
51790 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51791 +
51792 + out:
51793 + mutex_unlock(&gr_dev_mutex);
51794 + return error;
51795 +}
51796 +
51797 +/* must be called with
51798 + rcu_read_lock();
51799 + read_lock(&tasklist_lock);
51800 + read_lock(&grsec_exec_file_lock);
51801 +*/
51802 +int gr_apply_subject_to_task(struct task_struct *task)
51803 +{
51804 + struct acl_object_label *obj;
51805 + char *tmpname;
51806 + struct acl_subject_label *tmpsubj;
51807 + struct file *filp;
51808 + struct name_entry *nmatch;
51809 +
51810 + filp = task->exec_file;
51811 + if (filp == NULL)
51812 + return 0;
51813 +
51814 + /* the following is to apply the correct subject
51815 + on binaries running when the RBAC system
51816 + is enabled, when the binaries have been
51817 + replaced or deleted since their execution
51818 + -----
51819 + when the RBAC system starts, the inode/dev
51820 + from exec_file will be one the RBAC system
51821 + is unaware of. It only knows the inode/dev
51822 + of the present file on disk, or the absence
51823 + of it.
51824 + */
51825 + preempt_disable();
51826 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51827 +
51828 + nmatch = lookup_name_entry(tmpname);
51829 + preempt_enable();
51830 + tmpsubj = NULL;
51831 + if (nmatch) {
51832 + if (nmatch->deleted)
51833 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51834 + else
51835 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51836 + if (tmpsubj != NULL)
51837 + task->acl = tmpsubj;
51838 + }
51839 + if (tmpsubj == NULL)
51840 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51841 + task->role);
51842 + if (task->acl) {
51843 + task->is_writable = 0;
51844 + /* ignore additional mmap checks for processes that are writable
51845 + by the default ACL */
51846 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51847 + if (unlikely(obj->mode & GR_WRITE))
51848 + task->is_writable = 1;
51849 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51850 + if (unlikely(obj->mode & GR_WRITE))
51851 + task->is_writable = 1;
51852 +
51853 + gr_set_proc_res(task);
51854 +
51855 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51856 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51857 +#endif
51858 + } else {
51859 + return 1;
51860 + }
51861 +
51862 + return 0;
51863 +}
51864 +
51865 +int
51866 +gr_set_acls(const int type)
51867 +{
51868 + struct task_struct *task, *task2;
51869 + struct acl_role_label *role = current->role;
51870 + __u16 acl_role_id = current->acl_role_id;
51871 + const struct cred *cred;
51872 + int ret;
51873 +
51874 + rcu_read_lock();
51875 + read_lock(&tasklist_lock);
51876 + read_lock(&grsec_exec_file_lock);
51877 + do_each_thread(task2, task) {
51878 + /* check to see if we're called from the exit handler,
51879 + if so, only replace ACLs that have inherited the admin
51880 + ACL */
51881 +
51882 + if (type && (task->role != role ||
51883 + task->acl_role_id != acl_role_id))
51884 + continue;
51885 +
51886 + task->acl_role_id = 0;
51887 + task->acl_sp_role = 0;
51888 +
51889 + if (task->exec_file) {
51890 + cred = __task_cred(task);
51891 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51892 + ret = gr_apply_subject_to_task(task);
51893 + if (ret) {
51894 + read_unlock(&grsec_exec_file_lock);
51895 + read_unlock(&tasklist_lock);
51896 + rcu_read_unlock();
51897 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51898 + return ret;
51899 + }
51900 + } else {
51901 + // it's a kernel process
51902 + task->role = kernel_role;
51903 + task->acl = kernel_role->root_label;
51904 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51905 + task->acl->mode &= ~GR_PROCFIND;
51906 +#endif
51907 + }
51908 + } while_each_thread(task2, task);
51909 + read_unlock(&grsec_exec_file_lock);
51910 + read_unlock(&tasklist_lock);
51911 + rcu_read_unlock();
51912 +
51913 + return 0;
51914 +}
51915 +
51916 +void
51917 +gr_learn_resource(const struct task_struct *task,
51918 + const int res, const unsigned long wanted, const int gt)
51919 +{
51920 + struct acl_subject_label *acl;
51921 + const struct cred *cred;
51922 +
51923 + if (unlikely((gr_status & GR_READY) &&
51924 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51925 + goto skip_reslog;
51926 +
51927 +#ifdef CONFIG_GRKERNSEC_RESLOG
51928 + gr_log_resource(task, res, wanted, gt);
51929 +#endif
51930 + skip_reslog:
51931 +
51932 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51933 + return;
51934 +
51935 + acl = task->acl;
51936 +
51937 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51938 + !(acl->resmask & (1 << (unsigned short) res))))
51939 + return;
51940 +
51941 + if (wanted >= acl->res[res].rlim_cur) {
51942 + unsigned long res_add;
51943 +
51944 + res_add = wanted;
51945 + switch (res) {
51946 + case RLIMIT_CPU:
51947 + res_add += GR_RLIM_CPU_BUMP;
51948 + break;
51949 + case RLIMIT_FSIZE:
51950 + res_add += GR_RLIM_FSIZE_BUMP;
51951 + break;
51952 + case RLIMIT_DATA:
51953 + res_add += GR_RLIM_DATA_BUMP;
51954 + break;
51955 + case RLIMIT_STACK:
51956 + res_add += GR_RLIM_STACK_BUMP;
51957 + break;
51958 + case RLIMIT_CORE:
51959 + res_add += GR_RLIM_CORE_BUMP;
51960 + break;
51961 + case RLIMIT_RSS:
51962 + res_add += GR_RLIM_RSS_BUMP;
51963 + break;
51964 + case RLIMIT_NPROC:
51965 + res_add += GR_RLIM_NPROC_BUMP;
51966 + break;
51967 + case RLIMIT_NOFILE:
51968 + res_add += GR_RLIM_NOFILE_BUMP;
51969 + break;
51970 + case RLIMIT_MEMLOCK:
51971 + res_add += GR_RLIM_MEMLOCK_BUMP;
51972 + break;
51973 + case RLIMIT_AS:
51974 + res_add += GR_RLIM_AS_BUMP;
51975 + break;
51976 + case RLIMIT_LOCKS:
51977 + res_add += GR_RLIM_LOCKS_BUMP;
51978 + break;
51979 + case RLIMIT_SIGPENDING:
51980 + res_add += GR_RLIM_SIGPENDING_BUMP;
51981 + break;
51982 + case RLIMIT_MSGQUEUE:
51983 + res_add += GR_RLIM_MSGQUEUE_BUMP;
51984 + break;
51985 + case RLIMIT_NICE:
51986 + res_add += GR_RLIM_NICE_BUMP;
51987 + break;
51988 + case RLIMIT_RTPRIO:
51989 + res_add += GR_RLIM_RTPRIO_BUMP;
51990 + break;
51991 + case RLIMIT_RTTIME:
51992 + res_add += GR_RLIM_RTTIME_BUMP;
51993 + break;
51994 + }
51995 +
51996 + acl->res[res].rlim_cur = res_add;
51997 +
51998 + if (wanted > acl->res[res].rlim_max)
51999 + acl->res[res].rlim_max = res_add;
52000 +
52001 + /* only log the subject filename, since resource logging is supported for
52002 + single-subject learning only */
52003 + rcu_read_lock();
52004 + cred = __task_cred(task);
52005 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52006 + task->role->roletype, cred->uid, cred->gid, acl->filename,
52007 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
52008 + "", (unsigned long) res, &task->signal->saved_ip);
52009 + rcu_read_unlock();
52010 + }
52011 +
52012 + return;
52013 +}
52014 +
52015 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
52016 +void
52017 +pax_set_initial_flags(struct linux_binprm *bprm)
52018 +{
52019 + struct task_struct *task = current;
52020 + struct acl_subject_label *proc;
52021 + unsigned long flags;
52022 +
52023 + if (unlikely(!(gr_status & GR_READY)))
52024 + return;
52025 +
52026 + flags = pax_get_flags(task);
52027 +
52028 + proc = task->acl;
52029 +
52030 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
52031 + flags &= ~MF_PAX_PAGEEXEC;
52032 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
52033 + flags &= ~MF_PAX_SEGMEXEC;
52034 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
52035 + flags &= ~MF_PAX_RANDMMAP;
52036 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
52037 + flags &= ~MF_PAX_EMUTRAMP;
52038 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
52039 + flags &= ~MF_PAX_MPROTECT;
52040 +
52041 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
52042 + flags |= MF_PAX_PAGEEXEC;
52043 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
52044 + flags |= MF_PAX_SEGMEXEC;
52045 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
52046 + flags |= MF_PAX_RANDMMAP;
52047 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
52048 + flags |= MF_PAX_EMUTRAMP;
52049 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
52050 + flags |= MF_PAX_MPROTECT;
52051 +
52052 + pax_set_flags(task, flags);
52053 +
52054 + return;
52055 +}
52056 +#endif
52057 +
52058 +#ifdef CONFIG_SYSCTL
52059 +/* Eric Biederman likes breaking userland ABI and every inode-based security
52060 + system to save 35kb of memory */
52061 +
52062 +/* we modify the passed in filename, but adjust it back before returning */
52063 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
52064 +{
52065 + struct name_entry *nmatch;
52066 + char *p, *lastp = NULL;
52067 + struct acl_object_label *obj = NULL, *tmp;
52068 + struct acl_subject_label *tmpsubj;
52069 + char c = '\0';
52070 +
52071 + read_lock(&gr_inode_lock);
52072 +
52073 + p = name + len - 1;
52074 + do {
52075 + nmatch = lookup_name_entry(name);
52076 + if (lastp != NULL)
52077 + *lastp = c;
52078 +
52079 + if (nmatch == NULL)
52080 + goto next_component;
52081 + tmpsubj = current->acl;
52082 + do {
52083 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
52084 + if (obj != NULL) {
52085 + tmp = obj->globbed;
52086 + while (tmp) {
52087 + if (!glob_match(tmp->filename, name)) {
52088 + obj = tmp;
52089 + goto found_obj;
52090 + }
52091 + tmp = tmp->next;
52092 + }
52093 + goto found_obj;
52094 + }
52095 + } while ((tmpsubj = tmpsubj->parent_subject));
52096 +next_component:
52097 + /* end case */
52098 + if (p == name)
52099 + break;
52100 +
52101 + while (*p != '/')
52102 + p--;
52103 + if (p == name)
52104 + lastp = p + 1;
52105 + else {
52106 + lastp = p;
52107 + p--;
52108 + }
52109 + c = *lastp;
52110 + *lastp = '\0';
52111 + } while (1);
52112 +found_obj:
52113 + read_unlock(&gr_inode_lock);
52114 + /* obj returned will always be non-null */
52115 + return obj;
52116 +}
52117 +
52118 +/* returns 0 when allowing, non-zero on error
52119 + op of 0 is used for readdir, so we don't log the names of hidden files
52120 +*/
52121 +__u32
52122 +gr_handle_sysctl(const struct ctl_table *table, const int op)
52123 +{
52124 + struct ctl_table *tmp;
52125 + const char *proc_sys = "/proc/sys";
52126 + char *path;
52127 + struct acl_object_label *obj;
52128 + unsigned short len = 0, pos = 0, depth = 0, i;
52129 + __u32 err = 0;
52130 + __u32 mode = 0;
52131 +
52132 + if (unlikely(!(gr_status & GR_READY)))
52133 + return 0;
52134 +
52135 + /* for now, ignore operations on non-sysctl entries if it's not a
52136 + readdir*/
52137 + if (table->child != NULL && op != 0)
52138 + return 0;
52139 +
52140 + mode |= GR_FIND;
52141 + /* it's only a read if it's an entry, read on dirs is for readdir */
52142 + if (op & MAY_READ)
52143 + mode |= GR_READ;
52144 + if (op & MAY_WRITE)
52145 + mode |= GR_WRITE;
52146 +
52147 + preempt_disable();
52148 +
52149 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
52150 +
52151 + /* it's only a read/write if it's an actual entry, not a dir
52152 + (which are opened for readdir)
52153 + */
52154 +
52155 + /* convert the requested sysctl entry into a pathname */
52156 +
52157 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52158 + len += strlen(tmp->procname);
52159 + len++;
52160 + depth++;
52161 + }
52162 +
52163 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
52164 + /* deny */
52165 + goto out;
52166 + }
52167 +
52168 + memset(path, 0, PAGE_SIZE);
52169 +
52170 + memcpy(path, proc_sys, strlen(proc_sys));
52171 +
52172 + pos += strlen(proc_sys);
52173 +
52174 + for (; depth > 0; depth--) {
52175 + path[pos] = '/';
52176 + pos++;
52177 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52178 + if (depth == i) {
52179 + memcpy(path + pos, tmp->procname,
52180 + strlen(tmp->procname));
52181 + pos += strlen(tmp->procname);
52182 + }
52183 + i++;
52184 + }
52185 + }
52186 +
52187 + obj = gr_lookup_by_name(path, pos);
52188 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
52189 +
52190 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
52191 + ((err & mode) != mode))) {
52192 + __u32 new_mode = mode;
52193 +
52194 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52195 +
52196 + err = 0;
52197 + gr_log_learn_sysctl(path, new_mode);
52198 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52199 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52200 + err = -ENOENT;
52201 + } else if (!(err & GR_FIND)) {
52202 + err = -ENOENT;
52203 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52204 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52205 + path, (mode & GR_READ) ? " reading" : "",
52206 + (mode & GR_WRITE) ? " writing" : "");
52207 + err = -EACCES;
52208 + } else if ((err & mode) != mode) {
52209 + err = -EACCES;
52210 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52211 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52212 + path, (mode & GR_READ) ? " reading" : "",
52213 + (mode & GR_WRITE) ? " writing" : "");
52214 + err = 0;
52215 + } else
52216 + err = 0;
52217 +
52218 + out:
52219 + preempt_enable();
52220 +
52221 + return err;
52222 +}
52223 +#endif
52224 +
52225 +int
52226 +gr_handle_proc_ptrace(struct task_struct *task)
52227 +{
52228 + struct file *filp;
52229 + struct task_struct *tmp = task;
52230 + struct task_struct *curtemp = current;
52231 + __u32 retmode;
52232 +
52233 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52234 + if (unlikely(!(gr_status & GR_READY)))
52235 + return 0;
52236 +#endif
52237 +
52238 + read_lock(&tasklist_lock);
52239 + read_lock(&grsec_exec_file_lock);
52240 + filp = task->exec_file;
52241 +
52242 + while (tmp->pid > 0) {
52243 + if (tmp == curtemp)
52244 + break;
52245 + tmp = tmp->real_parent;
52246 + }
52247 +
52248 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52249 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52250 + read_unlock(&grsec_exec_file_lock);
52251 + read_unlock(&tasklist_lock);
52252 + return 1;
52253 + }
52254 +
52255 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52256 + if (!(gr_status & GR_READY)) {
52257 + read_unlock(&grsec_exec_file_lock);
52258 + read_unlock(&tasklist_lock);
52259 + return 0;
52260 + }
52261 +#endif
52262 +
52263 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52264 + read_unlock(&grsec_exec_file_lock);
52265 + read_unlock(&tasklist_lock);
52266 +
52267 + if (retmode & GR_NOPTRACE)
52268 + return 1;
52269 +
52270 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52271 + && (current->acl != task->acl || (current->acl != current->role->root_label
52272 + && current->pid != task->pid)))
52273 + return 1;
52274 +
52275 + return 0;
52276 +}
52277 +
52278 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52279 +{
52280 + if (unlikely(!(gr_status & GR_READY)))
52281 + return;
52282 +
52283 + if (!(current->role->roletype & GR_ROLE_GOD))
52284 + return;
52285 +
52286 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52287 + p->role->rolename, gr_task_roletype_to_char(p),
52288 + p->acl->filename);
52289 +}
52290 +
52291 +int
52292 +gr_handle_ptrace(struct task_struct *task, const long request)
52293 +{
52294 + struct task_struct *tmp = task;
52295 + struct task_struct *curtemp = current;
52296 + __u32 retmode;
52297 +
52298 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52299 + if (unlikely(!(gr_status & GR_READY)))
52300 + return 0;
52301 +#endif
52302 +
52303 + read_lock(&tasklist_lock);
52304 + while (tmp->pid > 0) {
52305 + if (tmp == curtemp)
52306 + break;
52307 + tmp = tmp->real_parent;
52308 + }
52309 +
52310 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52311 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52312 + read_unlock(&tasklist_lock);
52313 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52314 + return 1;
52315 + }
52316 + read_unlock(&tasklist_lock);
52317 +
52318 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52319 + if (!(gr_status & GR_READY))
52320 + return 0;
52321 +#endif
52322 +
52323 + read_lock(&grsec_exec_file_lock);
52324 + if (unlikely(!task->exec_file)) {
52325 + read_unlock(&grsec_exec_file_lock);
52326 + return 0;
52327 + }
52328 +
52329 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52330 + read_unlock(&grsec_exec_file_lock);
52331 +
52332 + if (retmode & GR_NOPTRACE) {
52333 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52334 + return 1;
52335 + }
52336 +
52337 + if (retmode & GR_PTRACERD) {
52338 + switch (request) {
52339 + case PTRACE_SEIZE:
52340 + case PTRACE_POKETEXT:
52341 + case PTRACE_POKEDATA:
52342 + case PTRACE_POKEUSR:
52343 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52344 + case PTRACE_SETREGS:
52345 + case PTRACE_SETFPREGS:
52346 +#endif
52347 +#ifdef CONFIG_X86
52348 + case PTRACE_SETFPXREGS:
52349 +#endif
52350 +#ifdef CONFIG_ALTIVEC
52351 + case PTRACE_SETVRREGS:
52352 +#endif
52353 + return 1;
52354 + default:
52355 + return 0;
52356 + }
52357 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
52358 + !(current->role->roletype & GR_ROLE_GOD) &&
52359 + (current->acl != task->acl)) {
52360 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52361 + return 1;
52362 + }
52363 +
52364 + return 0;
52365 +}
52366 +
52367 +static int is_writable_mmap(const struct file *filp)
52368 +{
52369 + struct task_struct *task = current;
52370 + struct acl_object_label *obj, *obj2;
52371 +
52372 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52373 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52374 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52375 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52376 + task->role->root_label);
52377 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52378 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52379 + return 1;
52380 + }
52381 + }
52382 + return 0;
52383 +}
52384 +
52385 +int
52386 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52387 +{
52388 + __u32 mode;
52389 +
52390 + if (unlikely(!file || !(prot & PROT_EXEC)))
52391 + return 1;
52392 +
52393 + if (is_writable_mmap(file))
52394 + return 0;
52395 +
52396 + mode =
52397 + gr_search_file(file->f_path.dentry,
52398 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52399 + file->f_path.mnt);
52400 +
52401 + if (!gr_tpe_allow(file))
52402 + return 0;
52403 +
52404 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52405 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52406 + return 0;
52407 + } else if (unlikely(!(mode & GR_EXEC))) {
52408 + return 0;
52409 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52410 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52411 + return 1;
52412 + }
52413 +
52414 + return 1;
52415 +}
52416 +
52417 +int
52418 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52419 +{
52420 + __u32 mode;
52421 +
52422 + if (unlikely(!file || !(prot & PROT_EXEC)))
52423 + return 1;
52424 +
52425 + if (is_writable_mmap(file))
52426 + return 0;
52427 +
52428 + mode =
52429 + gr_search_file(file->f_path.dentry,
52430 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52431 + file->f_path.mnt);
52432 +
52433 + if (!gr_tpe_allow(file))
52434 + return 0;
52435 +
52436 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52437 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52438 + return 0;
52439 + } else if (unlikely(!(mode & GR_EXEC))) {
52440 + return 0;
52441 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52442 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52443 + return 1;
52444 + }
52445 +
52446 + return 1;
52447 +}
52448 +
52449 +void
52450 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52451 +{
52452 + unsigned long runtime;
52453 + unsigned long cputime;
52454 + unsigned int wday, cday;
52455 + __u8 whr, chr;
52456 + __u8 wmin, cmin;
52457 + __u8 wsec, csec;
52458 + struct timespec timeval;
52459 +
52460 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52461 + !(task->acl->mode & GR_PROCACCT)))
52462 + return;
52463 +
52464 + do_posix_clock_monotonic_gettime(&timeval);
52465 + runtime = timeval.tv_sec - task->start_time.tv_sec;
52466 + wday = runtime / (3600 * 24);
52467 + runtime -= wday * (3600 * 24);
52468 + whr = runtime / 3600;
52469 + runtime -= whr * 3600;
52470 + wmin = runtime / 60;
52471 + runtime -= wmin * 60;
52472 + wsec = runtime;
52473 +
52474 + cputime = (task->utime + task->stime) / HZ;
52475 + cday = cputime / (3600 * 24);
52476 + cputime -= cday * (3600 * 24);
52477 + chr = cputime / 3600;
52478 + cputime -= chr * 3600;
52479 + cmin = cputime / 60;
52480 + cputime -= cmin * 60;
52481 + csec = cputime;
52482 +
52483 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52484 +
52485 + return;
52486 +}
52487 +
52488 +void gr_set_kernel_label(struct task_struct *task)
52489 +{
52490 + if (gr_status & GR_READY) {
52491 + task->role = kernel_role;
52492 + task->acl = kernel_role->root_label;
52493 + }
52494 + return;
52495 +}
52496 +
52497 +#ifdef CONFIG_TASKSTATS
52498 +int gr_is_taskstats_denied(int pid)
52499 +{
52500 + struct task_struct *task;
52501 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52502 + const struct cred *cred;
52503 +#endif
52504 + int ret = 0;
52505 +
52506 + /* restrict taskstats viewing to un-chrooted root users
52507 + who have the 'view' subject flag if the RBAC system is enabled
52508 + */
52509 +
52510 + rcu_read_lock();
52511 + read_lock(&tasklist_lock);
52512 + task = find_task_by_vpid(pid);
52513 + if (task) {
52514 +#ifdef CONFIG_GRKERNSEC_CHROOT
52515 + if (proc_is_chrooted(task))
52516 + ret = -EACCES;
52517 +#endif
52518 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52519 + cred = __task_cred(task);
52520 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52521 + if (cred->uid != 0)
52522 + ret = -EACCES;
52523 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52524 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52525 + ret = -EACCES;
52526 +#endif
52527 +#endif
52528 + if (gr_status & GR_READY) {
52529 + if (!(task->acl->mode & GR_VIEW))
52530 + ret = -EACCES;
52531 + }
52532 + } else
52533 + ret = -ENOENT;
52534 +
52535 + read_unlock(&tasklist_lock);
52536 + rcu_read_unlock();
52537 +
52538 + return ret;
52539 +}
52540 +#endif
52541 +
52542 +/* AUXV entries are filled via a descendant of search_binary_handler
52543 + after we've already applied the subject for the target
52544 +*/
52545 +int gr_acl_enable_at_secure(void)
52546 +{
52547 + if (unlikely(!(gr_status & GR_READY)))
52548 + return 0;
52549 +
52550 + if (current->acl->mode & GR_ATSECURE)
52551 + return 1;
52552 +
52553 + return 0;
52554 +}
52555 +
52556 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52557 +{
52558 + struct task_struct *task = current;
52559 + struct dentry *dentry = file->f_path.dentry;
52560 + struct vfsmount *mnt = file->f_path.mnt;
52561 + struct acl_object_label *obj, *tmp;
52562 + struct acl_subject_label *subj;
52563 + unsigned int bufsize;
52564 + int is_not_root;
52565 + char *path;
52566 + dev_t dev = __get_dev(dentry);
52567 +
52568 + if (unlikely(!(gr_status & GR_READY)))
52569 + return 1;
52570 +
52571 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52572 + return 1;
52573 +
52574 + /* ignore Eric Biederman */
52575 + if (IS_PRIVATE(dentry->d_inode))
52576 + return 1;
52577 +
52578 + subj = task->acl;
52579 + do {
52580 + obj = lookup_acl_obj_label(ino, dev, subj);
52581 + if (obj != NULL)
52582 + return (obj->mode & GR_FIND) ? 1 : 0;
52583 + } while ((subj = subj->parent_subject));
52584 +
52585 + /* this is purely an optimization since we're looking for an object
52586 + for the directory we're doing a readdir on
52587 + if it's possible for any globbed object to match the entry we're
52588 + filling into the directory, then the object we find here will be
52589 + an anchor point with attached globbed objects
52590 + */
52591 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52592 + if (obj->globbed == NULL)
52593 + return (obj->mode & GR_FIND) ? 1 : 0;
52594 +
52595 + is_not_root = ((obj->filename[0] == '/') &&
52596 + (obj->filename[1] == '\0')) ? 0 : 1;
52597 + bufsize = PAGE_SIZE - namelen - is_not_root;
52598 +
52599 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
52600 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52601 + return 1;
52602 +
52603 + preempt_disable();
52604 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52605 + bufsize);
52606 +
52607 + bufsize = strlen(path);
52608 +
52609 + /* if base is "/", don't append an additional slash */
52610 + if (is_not_root)
52611 + *(path + bufsize) = '/';
52612 + memcpy(path + bufsize + is_not_root, name, namelen);
52613 + *(path + bufsize + namelen + is_not_root) = '\0';
52614 +
52615 + tmp = obj->globbed;
52616 + while (tmp) {
52617 + if (!glob_match(tmp->filename, path)) {
52618 + preempt_enable();
52619 + return (tmp->mode & GR_FIND) ? 1 : 0;
52620 + }
52621 + tmp = tmp->next;
52622 + }
52623 + preempt_enable();
52624 + return (obj->mode & GR_FIND) ? 1 : 0;
52625 +}
52626 +
52627 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52628 +EXPORT_SYMBOL(gr_acl_is_enabled);
52629 +#endif
52630 +EXPORT_SYMBOL(gr_learn_resource);
52631 +EXPORT_SYMBOL(gr_set_kernel_label);
52632 +#ifdef CONFIG_SECURITY
52633 +EXPORT_SYMBOL(gr_check_user_change);
52634 +EXPORT_SYMBOL(gr_check_group_change);
52635 +#endif
52636 +
52637 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52638 new file mode 100644
52639 index 0000000..34fefda
52640 --- /dev/null
52641 +++ b/grsecurity/gracl_alloc.c
52642 @@ -0,0 +1,105 @@
52643 +#include <linux/kernel.h>
52644 +#include <linux/mm.h>
52645 +#include <linux/slab.h>
52646 +#include <linux/vmalloc.h>
52647 +#include <linux/gracl.h>
52648 +#include <linux/grsecurity.h>
52649 +
52650 +static unsigned long alloc_stack_next = 1;
52651 +static unsigned long alloc_stack_size = 1;
52652 +static void **alloc_stack;
52653 +
52654 +static __inline__ int
52655 +alloc_pop(void)
52656 +{
52657 + if (alloc_stack_next == 1)
52658 + return 0;
52659 +
52660 + kfree(alloc_stack[alloc_stack_next - 2]);
52661 +
52662 + alloc_stack_next--;
52663 +
52664 + return 1;
52665 +}
52666 +
52667 +static __inline__ int
52668 +alloc_push(void *buf)
52669 +{
52670 + if (alloc_stack_next >= alloc_stack_size)
52671 + return 1;
52672 +
52673 + alloc_stack[alloc_stack_next - 1] = buf;
52674 +
52675 + alloc_stack_next++;
52676 +
52677 + return 0;
52678 +}
52679 +
52680 +void *
52681 +acl_alloc(unsigned long len)
52682 +{
52683 + void *ret = NULL;
52684 +
52685 + if (!len || len > PAGE_SIZE)
52686 + goto out;
52687 +
52688 + ret = kmalloc(len, GFP_KERNEL);
52689 +
52690 + if (ret) {
52691 + if (alloc_push(ret)) {
52692 + kfree(ret);
52693 + ret = NULL;
52694 + }
52695 + }
52696 +
52697 +out:
52698 + return ret;
52699 +}
52700 +
52701 +void *
52702 +acl_alloc_num(unsigned long num, unsigned long len)
52703 +{
52704 + if (!len || (num > (PAGE_SIZE / len)))
52705 + return NULL;
52706 +
52707 + return acl_alloc(num * len);
52708 +}
52709 +
52710 +void
52711 +acl_free_all(void)
52712 +{
52713 + if (gr_acl_is_enabled() || !alloc_stack)
52714 + return;
52715 +
52716 + while (alloc_pop()) ;
52717 +
52718 + if (alloc_stack) {
52719 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52720 + kfree(alloc_stack);
52721 + else
52722 + vfree(alloc_stack);
52723 + }
52724 +
52725 + alloc_stack = NULL;
52726 + alloc_stack_size = 1;
52727 + alloc_stack_next = 1;
52728 +
52729 + return;
52730 +}
52731 +
52732 +int
52733 +acl_alloc_stack_init(unsigned long size)
52734 +{
52735 + if ((size * sizeof (void *)) <= PAGE_SIZE)
52736 + alloc_stack =
52737 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52738 + else
52739 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
52740 +
52741 + alloc_stack_size = size;
52742 +
52743 + if (!alloc_stack)
52744 + return 0;
52745 + else
52746 + return 1;
52747 +}
52748 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52749 new file mode 100644
52750 index 0000000..955ddfb
52751 --- /dev/null
52752 +++ b/grsecurity/gracl_cap.c
52753 @@ -0,0 +1,101 @@
52754 +#include <linux/kernel.h>
52755 +#include <linux/module.h>
52756 +#include <linux/sched.h>
52757 +#include <linux/gracl.h>
52758 +#include <linux/grsecurity.h>
52759 +#include <linux/grinternal.h>
52760 +
52761 +extern const char *captab_log[];
52762 +extern int captab_log_entries;
52763 +
52764 +int
52765 +gr_acl_is_capable(const int cap)
52766 +{
52767 + struct task_struct *task = current;
52768 + const struct cred *cred = current_cred();
52769 + struct acl_subject_label *curracl;
52770 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52771 + kernel_cap_t cap_audit = __cap_empty_set;
52772 +
52773 + if (!gr_acl_is_enabled())
52774 + return 1;
52775 +
52776 + curracl = task->acl;
52777 +
52778 + cap_drop = curracl->cap_lower;
52779 + cap_mask = curracl->cap_mask;
52780 + cap_audit = curracl->cap_invert_audit;
52781 +
52782 + while ((curracl = curracl->parent_subject)) {
52783 + /* if the cap isn't specified in the current computed mask but is specified in the
52784 + current level subject, and is lowered in the current level subject, then add
52785 + it to the set of dropped capabilities
52786 + otherwise, add the current level subject's mask to the current computed mask
52787 + */
52788 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52789 + cap_raise(cap_mask, cap);
52790 + if (cap_raised(curracl->cap_lower, cap))
52791 + cap_raise(cap_drop, cap);
52792 + if (cap_raised(curracl->cap_invert_audit, cap))
52793 + cap_raise(cap_audit, cap);
52794 + }
52795 + }
52796 +
52797 + if (!cap_raised(cap_drop, cap)) {
52798 + if (cap_raised(cap_audit, cap))
52799 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52800 + return 1;
52801 + }
52802 +
52803 + curracl = task->acl;
52804 +
52805 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52806 + && cap_raised(cred->cap_effective, cap)) {
52807 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52808 + task->role->roletype, cred->uid,
52809 + cred->gid, task->exec_file ?
52810 + gr_to_filename(task->exec_file->f_path.dentry,
52811 + task->exec_file->f_path.mnt) : curracl->filename,
52812 + curracl->filename, 0UL,
52813 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52814 + return 1;
52815 + }
52816 +
52817 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52818 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52819 + return 0;
52820 +}
52821 +
52822 +int
52823 +gr_acl_is_capable_nolog(const int cap)
52824 +{
52825 + struct acl_subject_label *curracl;
52826 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52827 +
52828 + if (!gr_acl_is_enabled())
52829 + return 1;
52830 +
52831 + curracl = current->acl;
52832 +
52833 + cap_drop = curracl->cap_lower;
52834 + cap_mask = curracl->cap_mask;
52835 +
52836 + while ((curracl = curracl->parent_subject)) {
52837 + /* if the cap isn't specified in the current computed mask but is specified in the
52838 + current level subject, and is lowered in the current level subject, then add
52839 + it to the set of dropped capabilities
52840 + otherwise, add the current level subject's mask to the current computed mask
52841 + */
52842 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52843 + cap_raise(cap_mask, cap);
52844 + if (cap_raised(curracl->cap_lower, cap))
52845 + cap_raise(cap_drop, cap);
52846 + }
52847 + }
52848 +
52849 + if (!cap_raised(cap_drop, cap))
52850 + return 1;
52851 +
52852 + return 0;
52853 +}
52854 +
52855 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52856 new file mode 100644
52857 index 0000000..88d0e87
52858 --- /dev/null
52859 +++ b/grsecurity/gracl_fs.c
52860 @@ -0,0 +1,435 @@
52861 +#include <linux/kernel.h>
52862 +#include <linux/sched.h>
52863 +#include <linux/types.h>
52864 +#include <linux/fs.h>
52865 +#include <linux/file.h>
52866 +#include <linux/stat.h>
52867 +#include <linux/grsecurity.h>
52868 +#include <linux/grinternal.h>
52869 +#include <linux/gracl.h>
52870 +
52871 +umode_t
52872 +gr_acl_umask(void)
52873 +{
52874 + if (unlikely(!gr_acl_is_enabled()))
52875 + return 0;
52876 +
52877 + return current->role->umask;
52878 +}
52879 +
52880 +__u32
52881 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52882 + const struct vfsmount * mnt)
52883 +{
52884 + __u32 mode;
52885 +
52886 + if (unlikely(!dentry->d_inode))
52887 + return GR_FIND;
52888 +
52889 + mode =
52890 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52891 +
52892 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52893 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52894 + return mode;
52895 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52896 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52897 + return 0;
52898 + } else if (unlikely(!(mode & GR_FIND)))
52899 + return 0;
52900 +
52901 + return GR_FIND;
52902 +}
52903 +
52904 +__u32
52905 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52906 + int acc_mode)
52907 +{
52908 + __u32 reqmode = GR_FIND;
52909 + __u32 mode;
52910 +
52911 + if (unlikely(!dentry->d_inode))
52912 + return reqmode;
52913 +
52914 + if (acc_mode & MAY_APPEND)
52915 + reqmode |= GR_APPEND;
52916 + else if (acc_mode & MAY_WRITE)
52917 + reqmode |= GR_WRITE;
52918 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52919 + reqmode |= GR_READ;
52920 +
52921 + mode =
52922 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52923 + mnt);
52924 +
52925 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52926 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52927 + reqmode & GR_READ ? " reading" : "",
52928 + reqmode & GR_WRITE ? " writing" : reqmode &
52929 + GR_APPEND ? " appending" : "");
52930 + return reqmode;
52931 + } else
52932 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52933 + {
52934 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52935 + reqmode & GR_READ ? " reading" : "",
52936 + reqmode & GR_WRITE ? " writing" : reqmode &
52937 + GR_APPEND ? " appending" : "");
52938 + return 0;
52939 + } else if (unlikely((mode & reqmode) != reqmode))
52940 + return 0;
52941 +
52942 + return reqmode;
52943 +}
52944 +
52945 +__u32
52946 +gr_acl_handle_creat(const struct dentry * dentry,
52947 + const struct dentry * p_dentry,
52948 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52949 + const int imode)
52950 +{
52951 + __u32 reqmode = GR_WRITE | GR_CREATE;
52952 + __u32 mode;
52953 +
52954 + if (acc_mode & MAY_APPEND)
52955 + reqmode |= GR_APPEND;
52956 + // if a directory was required or the directory already exists, then
52957 + // don't count this open as a read
52958 + if ((acc_mode & MAY_READ) &&
52959 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52960 + reqmode |= GR_READ;
52961 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52962 + reqmode |= GR_SETID;
52963 +
52964 + mode =
52965 + gr_check_create(dentry, p_dentry, p_mnt,
52966 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52967 +
52968 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52969 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52970 + reqmode & GR_READ ? " reading" : "",
52971 + reqmode & GR_WRITE ? " writing" : reqmode &
52972 + GR_APPEND ? " appending" : "");
52973 + return reqmode;
52974 + } else
52975 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52976 + {
52977 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52978 + reqmode & GR_READ ? " reading" : "",
52979 + reqmode & GR_WRITE ? " writing" : reqmode &
52980 + GR_APPEND ? " appending" : "");
52981 + return 0;
52982 + } else if (unlikely((mode & reqmode) != reqmode))
52983 + return 0;
52984 +
52985 + return reqmode;
52986 +}
52987 +
52988 +__u32
52989 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52990 + const int fmode)
52991 +{
52992 + __u32 mode, reqmode = GR_FIND;
52993 +
52994 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52995 + reqmode |= GR_EXEC;
52996 + if (fmode & S_IWOTH)
52997 + reqmode |= GR_WRITE;
52998 + if (fmode & S_IROTH)
52999 + reqmode |= GR_READ;
53000 +
53001 + mode =
53002 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
53003 + mnt);
53004 +
53005 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
53006 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
53007 + reqmode & GR_READ ? " reading" : "",
53008 + reqmode & GR_WRITE ? " writing" : "",
53009 + reqmode & GR_EXEC ? " executing" : "");
53010 + return reqmode;
53011 + } else
53012 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
53013 + {
53014 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
53015 + reqmode & GR_READ ? " reading" : "",
53016 + reqmode & GR_WRITE ? " writing" : "",
53017 + reqmode & GR_EXEC ? " executing" : "");
53018 + return 0;
53019 + } else if (unlikely((mode & reqmode) != reqmode))
53020 + return 0;
53021 +
53022 + return reqmode;
53023 +}
53024 +
53025 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
53026 +{
53027 + __u32 mode;
53028 +
53029 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
53030 +
53031 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53032 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
53033 + return mode;
53034 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53035 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
53036 + return 0;
53037 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53038 + return 0;
53039 +
53040 + return (reqmode);
53041 +}
53042 +
53043 +__u32
53044 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53045 +{
53046 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
53047 +}
53048 +
53049 +__u32
53050 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
53051 +{
53052 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
53053 +}
53054 +
53055 +__u32
53056 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
53057 +{
53058 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
53059 +}
53060 +
53061 +__u32
53062 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
53063 +{
53064 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
53065 +}
53066 +
53067 +__u32
53068 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
53069 + umode_t *modeptr)
53070 +{
53071 + umode_t mode;
53072 +
53073 + *modeptr &= ~gr_acl_umask();
53074 + mode = *modeptr;
53075 +
53076 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
53077 + return 1;
53078 +
53079 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
53080 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
53081 + GR_CHMOD_ACL_MSG);
53082 + } else {
53083 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
53084 + }
53085 +}
53086 +
53087 +__u32
53088 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
53089 +{
53090 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
53091 +}
53092 +
53093 +__u32
53094 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
53095 +{
53096 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
53097 +}
53098 +
53099 +__u32
53100 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
53101 +{
53102 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
53103 +}
53104 +
53105 +__u32
53106 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
53107 +{
53108 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
53109 + GR_UNIXCONNECT_ACL_MSG);
53110 +}
53111 +
53112 +/* hardlinks require at minimum create and link permission,
53113 + any additional privilege required is based on the
53114 + privilege of the file being linked to
53115 +*/
53116 +__u32
53117 +gr_acl_handle_link(const struct dentry * new_dentry,
53118 + const struct dentry * parent_dentry,
53119 + const struct vfsmount * parent_mnt,
53120 + const struct dentry * old_dentry,
53121 + const struct vfsmount * old_mnt, const char *to)
53122 +{
53123 + __u32 mode;
53124 + __u32 needmode = GR_CREATE | GR_LINK;
53125 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
53126 +
53127 + mode =
53128 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
53129 + old_mnt);
53130 +
53131 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
53132 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53133 + return mode;
53134 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53135 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53136 + return 0;
53137 + } else if (unlikely((mode & needmode) != needmode))
53138 + return 0;
53139 +
53140 + return 1;
53141 +}
53142 +
53143 +__u32
53144 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53145 + const struct dentry * parent_dentry,
53146 + const struct vfsmount * parent_mnt, const char *from)
53147 +{
53148 + __u32 needmode = GR_WRITE | GR_CREATE;
53149 + __u32 mode;
53150 +
53151 + mode =
53152 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
53153 + GR_CREATE | GR_AUDIT_CREATE |
53154 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
53155 +
53156 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
53157 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53158 + return mode;
53159 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53160 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53161 + return 0;
53162 + } else if (unlikely((mode & needmode) != needmode))
53163 + return 0;
53164 +
53165 + return (GR_WRITE | GR_CREATE);
53166 +}
53167 +
53168 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
53169 +{
53170 + __u32 mode;
53171 +
53172 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
53173 +
53174 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53175 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
53176 + return mode;
53177 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53178 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
53179 + return 0;
53180 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53181 + return 0;
53182 +
53183 + return (reqmode);
53184 +}
53185 +
53186 +__u32
53187 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53188 + const struct dentry * parent_dentry,
53189 + const struct vfsmount * parent_mnt,
53190 + const int mode)
53191 +{
53192 + __u32 reqmode = GR_WRITE | GR_CREATE;
53193 + if (unlikely(mode & (S_ISUID | S_ISGID)))
53194 + reqmode |= GR_SETID;
53195 +
53196 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53197 + reqmode, GR_MKNOD_ACL_MSG);
53198 +}
53199 +
53200 +__u32
53201 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
53202 + const struct dentry *parent_dentry,
53203 + const struct vfsmount *parent_mnt)
53204 +{
53205 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53206 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53207 +}
53208 +
53209 +#define RENAME_CHECK_SUCCESS(old, new) \
53210 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53211 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53212 +
53213 +int
53214 +gr_acl_handle_rename(struct dentry *new_dentry,
53215 + struct dentry *parent_dentry,
53216 + const struct vfsmount *parent_mnt,
53217 + struct dentry *old_dentry,
53218 + struct inode *old_parent_inode,
53219 + struct vfsmount *old_mnt, const char *newname)
53220 +{
53221 + __u32 comp1, comp2;
53222 + int error = 0;
53223 +
53224 + if (unlikely(!gr_acl_is_enabled()))
53225 + return 0;
53226 +
53227 + if (!new_dentry->d_inode) {
53228 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53229 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53230 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53231 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53232 + GR_DELETE | GR_AUDIT_DELETE |
53233 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53234 + GR_SUPPRESS, old_mnt);
53235 + } else {
53236 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53237 + GR_CREATE | GR_DELETE |
53238 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53239 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53240 + GR_SUPPRESS, parent_mnt);
53241 + comp2 =
53242 + gr_search_file(old_dentry,
53243 + GR_READ | GR_WRITE | GR_AUDIT_READ |
53244 + GR_DELETE | GR_AUDIT_DELETE |
53245 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53246 + }
53247 +
53248 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53249 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53250 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53251 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53252 + && !(comp2 & GR_SUPPRESS)) {
53253 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53254 + error = -EACCES;
53255 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53256 + error = -EACCES;
53257 +
53258 + return error;
53259 +}
53260 +
53261 +void
53262 +gr_acl_handle_exit(void)
53263 +{
53264 + u16 id;
53265 + char *rolename;
53266 + struct file *exec_file;
53267 +
53268 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53269 + !(current->role->roletype & GR_ROLE_PERSIST))) {
53270 + id = current->acl_role_id;
53271 + rolename = current->role->rolename;
53272 + gr_set_acls(1);
53273 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53274 + }
53275 +
53276 + write_lock(&grsec_exec_file_lock);
53277 + exec_file = current->exec_file;
53278 + current->exec_file = NULL;
53279 + write_unlock(&grsec_exec_file_lock);
53280 +
53281 + if (exec_file)
53282 + fput(exec_file);
53283 +}
53284 +
53285 +int
53286 +gr_acl_handle_procpidmem(const struct task_struct *task)
53287 +{
53288 + if (unlikely(!gr_acl_is_enabled()))
53289 + return 0;
53290 +
53291 + if (task != current && task->acl->mode & GR_PROTPROCFD)
53292 + return -EACCES;
53293 +
53294 + return 0;
53295 +}
53296 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53297 new file mode 100644
53298 index 0000000..17050ca
53299 --- /dev/null
53300 +++ b/grsecurity/gracl_ip.c
53301 @@ -0,0 +1,381 @@
53302 +#include <linux/kernel.h>
53303 +#include <asm/uaccess.h>
53304 +#include <asm/errno.h>
53305 +#include <net/sock.h>
53306 +#include <linux/file.h>
53307 +#include <linux/fs.h>
53308 +#include <linux/net.h>
53309 +#include <linux/in.h>
53310 +#include <linux/skbuff.h>
53311 +#include <linux/ip.h>
53312 +#include <linux/udp.h>
53313 +#include <linux/types.h>
53314 +#include <linux/sched.h>
53315 +#include <linux/netdevice.h>
53316 +#include <linux/inetdevice.h>
53317 +#include <linux/gracl.h>
53318 +#include <linux/grsecurity.h>
53319 +#include <linux/grinternal.h>
53320 +
53321 +#define GR_BIND 0x01
53322 +#define GR_CONNECT 0x02
53323 +#define GR_INVERT 0x04
53324 +#define GR_BINDOVERRIDE 0x08
53325 +#define GR_CONNECTOVERRIDE 0x10
53326 +#define GR_SOCK_FAMILY 0x20
53327 +
53328 +static const char * gr_protocols[IPPROTO_MAX] = {
53329 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53330 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53331 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53332 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53333 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53334 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53335 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53336 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53337 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53338 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53339 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53340 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53341 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53342 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53343 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53344 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53345 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53346 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53347 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53348 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53349 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53350 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53351 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53352 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53353 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53354 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53355 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53356 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53357 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53358 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53359 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53360 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53361 + };
53362 +
53363 +static const char * gr_socktypes[SOCK_MAX] = {
53364 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53365 + "unknown:7", "unknown:8", "unknown:9", "packet"
53366 + };
53367 +
53368 +static const char * gr_sockfamilies[AF_MAX+1] = {
53369 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53370 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53371 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53372 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53373 + };
53374 +
53375 +const char *
53376 +gr_proto_to_name(unsigned char proto)
53377 +{
53378 + return gr_protocols[proto];
53379 +}
53380 +
53381 +const char *
53382 +gr_socktype_to_name(unsigned char type)
53383 +{
53384 + return gr_socktypes[type];
53385 +}
53386 +
53387 +const char *
53388 +gr_sockfamily_to_name(unsigned char family)
53389 +{
53390 + return gr_sockfamilies[family];
53391 +}
53392 +
53393 +int
53394 +gr_search_socket(const int domain, const int type, const int protocol)
53395 +{
53396 + struct acl_subject_label *curr;
53397 + const struct cred *cred = current_cred();
53398 +
53399 + if (unlikely(!gr_acl_is_enabled()))
53400 + goto exit;
53401 +
53402 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
53403 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53404 + goto exit; // let the kernel handle it
53405 +
53406 + curr = current->acl;
53407 +
53408 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53409 + /* the family is allowed, if this is PF_INET allow it only if
53410 + the extra sock type/protocol checks pass */
53411 + if (domain == PF_INET)
53412 + goto inet_check;
53413 + goto exit;
53414 + } else {
53415 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53416 + __u32 fakeip = 0;
53417 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53418 + current->role->roletype, cred->uid,
53419 + cred->gid, current->exec_file ?
53420 + gr_to_filename(current->exec_file->f_path.dentry,
53421 + current->exec_file->f_path.mnt) :
53422 + curr->filename, curr->filename,
53423 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53424 + &current->signal->saved_ip);
53425 + goto exit;
53426 + }
53427 + goto exit_fail;
53428 + }
53429 +
53430 +inet_check:
53431 + /* the rest of this checking is for IPv4 only */
53432 + if (!curr->ips)
53433 + goto exit;
53434 +
53435 + if ((curr->ip_type & (1 << type)) &&
53436 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53437 + goto exit;
53438 +
53439 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53440 + /* we don't place acls on raw sockets , and sometimes
53441 + dgram/ip sockets are opened for ioctl and not
53442 + bind/connect, so we'll fake a bind learn log */
53443 + if (type == SOCK_RAW || type == SOCK_PACKET) {
53444 + __u32 fakeip = 0;
53445 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53446 + current->role->roletype, cred->uid,
53447 + cred->gid, current->exec_file ?
53448 + gr_to_filename(current->exec_file->f_path.dentry,
53449 + current->exec_file->f_path.mnt) :
53450 + curr->filename, curr->filename,
53451 + &fakeip, 0, type,
53452 + protocol, GR_CONNECT, &current->signal->saved_ip);
53453 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53454 + __u32 fakeip = 0;
53455 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53456 + current->role->roletype, cred->uid,
53457 + cred->gid, current->exec_file ?
53458 + gr_to_filename(current->exec_file->f_path.dentry,
53459 + current->exec_file->f_path.mnt) :
53460 + curr->filename, curr->filename,
53461 + &fakeip, 0, type,
53462 + protocol, GR_BIND, &current->signal->saved_ip);
53463 + }
53464 + /* we'll log when they use connect or bind */
53465 + goto exit;
53466 + }
53467 +
53468 +exit_fail:
53469 + if (domain == PF_INET)
53470 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53471 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
53472 + else
53473 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53474 + gr_socktype_to_name(type), protocol);
53475 +
53476 + return 0;
53477 +exit:
53478 + return 1;
53479 +}
53480 +
53481 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53482 +{
53483 + if ((ip->mode & mode) &&
53484 + (ip_port >= ip->low) &&
53485 + (ip_port <= ip->high) &&
53486 + ((ntohl(ip_addr) & our_netmask) ==
53487 + (ntohl(our_addr) & our_netmask))
53488 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53489 + && (ip->type & (1 << type))) {
53490 + if (ip->mode & GR_INVERT)
53491 + return 2; // specifically denied
53492 + else
53493 + return 1; // allowed
53494 + }
53495 +
53496 + return 0; // not specifically allowed, may continue parsing
53497 +}
53498 +
53499 +static int
53500 +gr_search_connectbind(const int full_mode, struct sock *sk,
53501 + struct sockaddr_in *addr, const int type)
53502 +{
53503 + char iface[IFNAMSIZ] = {0};
53504 + struct acl_subject_label *curr;
53505 + struct acl_ip_label *ip;
53506 + struct inet_sock *isk;
53507 + struct net_device *dev;
53508 + struct in_device *idev;
53509 + unsigned long i;
53510 + int ret;
53511 + int mode = full_mode & (GR_BIND | GR_CONNECT);
53512 + __u32 ip_addr = 0;
53513 + __u32 our_addr;
53514 + __u32 our_netmask;
53515 + char *p;
53516 + __u16 ip_port = 0;
53517 + const struct cred *cred = current_cred();
53518 +
53519 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53520 + return 0;
53521 +
53522 + curr = current->acl;
53523 + isk = inet_sk(sk);
53524 +
53525 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53526 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53527 + addr->sin_addr.s_addr = curr->inaddr_any_override;
53528 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53529 + struct sockaddr_in saddr;
53530 + int err;
53531 +
53532 + saddr.sin_family = AF_INET;
53533 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
53534 + saddr.sin_port = isk->inet_sport;
53535 +
53536 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53537 + if (err)
53538 + return err;
53539 +
53540 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53541 + if (err)
53542 + return err;
53543 + }
53544 +
53545 + if (!curr->ips)
53546 + return 0;
53547 +
53548 + ip_addr = addr->sin_addr.s_addr;
53549 + ip_port = ntohs(addr->sin_port);
53550 +
53551 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53552 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53553 + current->role->roletype, cred->uid,
53554 + cred->gid, current->exec_file ?
53555 + gr_to_filename(current->exec_file->f_path.dentry,
53556 + current->exec_file->f_path.mnt) :
53557 + curr->filename, curr->filename,
53558 + &ip_addr, ip_port, type,
53559 + sk->sk_protocol, mode, &current->signal->saved_ip);
53560 + return 0;
53561 + }
53562 +
53563 + for (i = 0; i < curr->ip_num; i++) {
53564 + ip = *(curr->ips + i);
53565 + if (ip->iface != NULL) {
53566 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
53567 + p = strchr(iface, ':');
53568 + if (p != NULL)
53569 + *p = '\0';
53570 + dev = dev_get_by_name(sock_net(sk), iface);
53571 + if (dev == NULL)
53572 + continue;
53573 + idev = in_dev_get(dev);
53574 + if (idev == NULL) {
53575 + dev_put(dev);
53576 + continue;
53577 + }
53578 + rcu_read_lock();
53579 + for_ifa(idev) {
53580 + if (!strcmp(ip->iface, ifa->ifa_label)) {
53581 + our_addr = ifa->ifa_address;
53582 + our_netmask = 0xffffffff;
53583 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53584 + if (ret == 1) {
53585 + rcu_read_unlock();
53586 + in_dev_put(idev);
53587 + dev_put(dev);
53588 + return 0;
53589 + } else if (ret == 2) {
53590 + rcu_read_unlock();
53591 + in_dev_put(idev);
53592 + dev_put(dev);
53593 + goto denied;
53594 + }
53595 + }
53596 + } endfor_ifa(idev);
53597 + rcu_read_unlock();
53598 + in_dev_put(idev);
53599 + dev_put(dev);
53600 + } else {
53601 + our_addr = ip->addr;
53602 + our_netmask = ip->netmask;
53603 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53604 + if (ret == 1)
53605 + return 0;
53606 + else if (ret == 2)
53607 + goto denied;
53608 + }
53609 + }
53610 +
53611 +denied:
53612 + if (mode == GR_BIND)
53613 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53614 + else if (mode == GR_CONNECT)
53615 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53616 +
53617 + return -EACCES;
53618 +}
53619 +
53620 +int
53621 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53622 +{
53623 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53624 +}
53625 +
53626 +int
53627 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53628 +{
53629 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53630 +}
53631 +
53632 +int gr_search_listen(struct socket *sock)
53633 +{
53634 + struct sock *sk = sock->sk;
53635 + struct sockaddr_in addr;
53636 +
53637 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53638 + addr.sin_port = inet_sk(sk)->inet_sport;
53639 +
53640 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53641 +}
53642 +
53643 +int gr_search_accept(struct socket *sock)
53644 +{
53645 + struct sock *sk = sock->sk;
53646 + struct sockaddr_in addr;
53647 +
53648 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53649 + addr.sin_port = inet_sk(sk)->inet_sport;
53650 +
53651 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53652 +}
53653 +
53654 +int
53655 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53656 +{
53657 + if (addr)
53658 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53659 + else {
53660 + struct sockaddr_in sin;
53661 + const struct inet_sock *inet = inet_sk(sk);
53662 +
53663 + sin.sin_addr.s_addr = inet->inet_daddr;
53664 + sin.sin_port = inet->inet_dport;
53665 +
53666 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53667 + }
53668 +}
53669 +
53670 +int
53671 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53672 +{
53673 + struct sockaddr_in sin;
53674 +
53675 + if (unlikely(skb->len < sizeof (struct udphdr)))
53676 + return 0; // skip this packet
53677 +
53678 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53679 + sin.sin_port = udp_hdr(skb)->source;
53680 +
53681 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53682 +}
53683 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53684 new file mode 100644
53685 index 0000000..25f54ef
53686 --- /dev/null
53687 +++ b/grsecurity/gracl_learn.c
53688 @@ -0,0 +1,207 @@
53689 +#include <linux/kernel.h>
53690 +#include <linux/mm.h>
53691 +#include <linux/sched.h>
53692 +#include <linux/poll.h>
53693 +#include <linux/string.h>
53694 +#include <linux/file.h>
53695 +#include <linux/types.h>
53696 +#include <linux/vmalloc.h>
53697 +#include <linux/grinternal.h>
53698 +
53699 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53700 + size_t count, loff_t *ppos);
53701 +extern int gr_acl_is_enabled(void);
53702 +
53703 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53704 +static int gr_learn_attached;
53705 +
53706 +/* use a 512k buffer */
53707 +#define LEARN_BUFFER_SIZE (512 * 1024)
53708 +
53709 +static DEFINE_SPINLOCK(gr_learn_lock);
53710 +static DEFINE_MUTEX(gr_learn_user_mutex);
53711 +
53712 +/* we need to maintain two buffers, so that the kernel context of grlearn
53713 + uses a semaphore around the userspace copying, and the other kernel contexts
53714 + use a spinlock when copying into the buffer, since they cannot sleep
53715 +*/
53716 +static char *learn_buffer;
53717 +static char *learn_buffer_user;
53718 +static int learn_buffer_len;
53719 +static int learn_buffer_user_len;
53720 +
53721 +static ssize_t
53722 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53723 +{
53724 + DECLARE_WAITQUEUE(wait, current);
53725 + ssize_t retval = 0;
53726 +
53727 + add_wait_queue(&learn_wait, &wait);
53728 + set_current_state(TASK_INTERRUPTIBLE);
53729 + do {
53730 + mutex_lock(&gr_learn_user_mutex);
53731 + spin_lock(&gr_learn_lock);
53732 + if (learn_buffer_len)
53733 + break;
53734 + spin_unlock(&gr_learn_lock);
53735 + mutex_unlock(&gr_learn_user_mutex);
53736 + if (file->f_flags & O_NONBLOCK) {
53737 + retval = -EAGAIN;
53738 + goto out;
53739 + }
53740 + if (signal_pending(current)) {
53741 + retval = -ERESTARTSYS;
53742 + goto out;
53743 + }
53744 +
53745 + schedule();
53746 + } while (1);
53747 +
53748 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53749 + learn_buffer_user_len = learn_buffer_len;
53750 + retval = learn_buffer_len;
53751 + learn_buffer_len = 0;
53752 +
53753 + spin_unlock(&gr_learn_lock);
53754 +
53755 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53756 + retval = -EFAULT;
53757 +
53758 + mutex_unlock(&gr_learn_user_mutex);
53759 +out:
53760 + set_current_state(TASK_RUNNING);
53761 + remove_wait_queue(&learn_wait, &wait);
53762 + return retval;
53763 +}
53764 +
53765 +static unsigned int
53766 +poll_learn(struct file * file, poll_table * wait)
53767 +{
53768 + poll_wait(file, &learn_wait, wait);
53769 +
53770 + if (learn_buffer_len)
53771 + return (POLLIN | POLLRDNORM);
53772 +
53773 + return 0;
53774 +}
53775 +
53776 +void
53777 +gr_clear_learn_entries(void)
53778 +{
53779 + char *tmp;
53780 +
53781 + mutex_lock(&gr_learn_user_mutex);
53782 + spin_lock(&gr_learn_lock);
53783 + tmp = learn_buffer;
53784 + learn_buffer = NULL;
53785 + spin_unlock(&gr_learn_lock);
53786 + if (tmp)
53787 + vfree(tmp);
53788 + if (learn_buffer_user != NULL) {
53789 + vfree(learn_buffer_user);
53790 + learn_buffer_user = NULL;
53791 + }
53792 + learn_buffer_len = 0;
53793 + mutex_unlock(&gr_learn_user_mutex);
53794 +
53795 + return;
53796 +}
53797 +
53798 +void
53799 +gr_add_learn_entry(const char *fmt, ...)
53800 +{
53801 + va_list args;
53802 + unsigned int len;
53803 +
53804 + if (!gr_learn_attached)
53805 + return;
53806 +
53807 + spin_lock(&gr_learn_lock);
53808 +
53809 + /* leave a gap at the end so we know when it's "full" but don't have to
53810 + compute the exact length of the string we're trying to append
53811 + */
53812 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53813 + spin_unlock(&gr_learn_lock);
53814 + wake_up_interruptible(&learn_wait);
53815 + return;
53816 + }
53817 + if (learn_buffer == NULL) {
53818 + spin_unlock(&gr_learn_lock);
53819 + return;
53820 + }
53821 +
53822 + va_start(args, fmt);
53823 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53824 + va_end(args);
53825 +
53826 + learn_buffer_len += len + 1;
53827 +
53828 + spin_unlock(&gr_learn_lock);
53829 + wake_up_interruptible(&learn_wait);
53830 +
53831 + return;
53832 +}
53833 +
53834 +static int
53835 +open_learn(struct inode *inode, struct file *file)
53836 +{
53837 + if (file->f_mode & FMODE_READ && gr_learn_attached)
53838 + return -EBUSY;
53839 + if (file->f_mode & FMODE_READ) {
53840 + int retval = 0;
53841 + mutex_lock(&gr_learn_user_mutex);
53842 + if (learn_buffer == NULL)
53843 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53844 + if (learn_buffer_user == NULL)
53845 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53846 + if (learn_buffer == NULL) {
53847 + retval = -ENOMEM;
53848 + goto out_error;
53849 + }
53850 + if (learn_buffer_user == NULL) {
53851 + retval = -ENOMEM;
53852 + goto out_error;
53853 + }
53854 + learn_buffer_len = 0;
53855 + learn_buffer_user_len = 0;
53856 + gr_learn_attached = 1;
53857 +out_error:
53858 + mutex_unlock(&gr_learn_user_mutex);
53859 + return retval;
53860 + }
53861 + return 0;
53862 +}
53863 +
53864 +static int
53865 +close_learn(struct inode *inode, struct file *file)
53866 +{
53867 + if (file->f_mode & FMODE_READ) {
53868 + char *tmp = NULL;
53869 + mutex_lock(&gr_learn_user_mutex);
53870 + spin_lock(&gr_learn_lock);
53871 + tmp = learn_buffer;
53872 + learn_buffer = NULL;
53873 + spin_unlock(&gr_learn_lock);
53874 + if (tmp)
53875 + vfree(tmp);
53876 + if (learn_buffer_user != NULL) {
53877 + vfree(learn_buffer_user);
53878 + learn_buffer_user = NULL;
53879 + }
53880 + learn_buffer_len = 0;
53881 + learn_buffer_user_len = 0;
53882 + gr_learn_attached = 0;
53883 + mutex_unlock(&gr_learn_user_mutex);
53884 + }
53885 +
53886 + return 0;
53887 +}
53888 +
53889 +const struct file_operations grsec_fops = {
53890 + .read = read_learn,
53891 + .write = write_grsec_handler,
53892 + .open = open_learn,
53893 + .release = close_learn,
53894 + .poll = poll_learn,
53895 +};
53896 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53897 new file mode 100644
53898 index 0000000..39645c9
53899 --- /dev/null
53900 +++ b/grsecurity/gracl_res.c
53901 @@ -0,0 +1,68 @@
53902 +#include <linux/kernel.h>
53903 +#include <linux/sched.h>
53904 +#include <linux/gracl.h>
53905 +#include <linux/grinternal.h>
53906 +
53907 +static const char *restab_log[] = {
53908 + [RLIMIT_CPU] = "RLIMIT_CPU",
53909 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53910 + [RLIMIT_DATA] = "RLIMIT_DATA",
53911 + [RLIMIT_STACK] = "RLIMIT_STACK",
53912 + [RLIMIT_CORE] = "RLIMIT_CORE",
53913 + [RLIMIT_RSS] = "RLIMIT_RSS",
53914 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
53915 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53916 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53917 + [RLIMIT_AS] = "RLIMIT_AS",
53918 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53919 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53920 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53921 + [RLIMIT_NICE] = "RLIMIT_NICE",
53922 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53923 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53924 + [GR_CRASH_RES] = "RLIMIT_CRASH"
53925 +};
53926 +
53927 +void
53928 +gr_log_resource(const struct task_struct *task,
53929 + const int res, const unsigned long wanted, const int gt)
53930 +{
53931 + const struct cred *cred;
53932 + unsigned long rlim;
53933 +
53934 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
53935 + return;
53936 +
53937 + // not yet supported resource
53938 + if (unlikely(!restab_log[res]))
53939 + return;
53940 +
53941 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53942 + rlim = task_rlimit_max(task, res);
53943 + else
53944 + rlim = task_rlimit(task, res);
53945 +
53946 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53947 + return;
53948 +
53949 + rcu_read_lock();
53950 + cred = __task_cred(task);
53951 +
53952 + if (res == RLIMIT_NPROC &&
53953 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53954 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53955 + goto out_rcu_unlock;
53956 + else if (res == RLIMIT_MEMLOCK &&
53957 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53958 + goto out_rcu_unlock;
53959 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53960 + goto out_rcu_unlock;
53961 + rcu_read_unlock();
53962 +
53963 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53964 +
53965 + return;
53966 +out_rcu_unlock:
53967 + rcu_read_unlock();
53968 + return;
53969 +}
53970 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53971 new file mode 100644
53972 index 0000000..5556be3
53973 --- /dev/null
53974 +++ b/grsecurity/gracl_segv.c
53975 @@ -0,0 +1,299 @@
53976 +#include <linux/kernel.h>
53977 +#include <linux/mm.h>
53978 +#include <asm/uaccess.h>
53979 +#include <asm/errno.h>
53980 +#include <asm/mman.h>
53981 +#include <net/sock.h>
53982 +#include <linux/file.h>
53983 +#include <linux/fs.h>
53984 +#include <linux/net.h>
53985 +#include <linux/in.h>
53986 +#include <linux/slab.h>
53987 +#include <linux/types.h>
53988 +#include <linux/sched.h>
53989 +#include <linux/timer.h>
53990 +#include <linux/gracl.h>
53991 +#include <linux/grsecurity.h>
53992 +#include <linux/grinternal.h>
53993 +
53994 +static struct crash_uid *uid_set;
53995 +static unsigned short uid_used;
53996 +static DEFINE_SPINLOCK(gr_uid_lock);
53997 +extern rwlock_t gr_inode_lock;
53998 +extern struct acl_subject_label *
53999 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
54000 + struct acl_role_label *role);
54001 +
54002 +#ifdef CONFIG_BTRFS_FS
54003 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
54004 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
54005 +#endif
54006 +
54007 +static inline dev_t __get_dev(const struct dentry *dentry)
54008 +{
54009 +#ifdef CONFIG_BTRFS_FS
54010 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
54011 + return get_btrfs_dev_from_inode(dentry->d_inode);
54012 + else
54013 +#endif
54014 + return dentry->d_inode->i_sb->s_dev;
54015 +}
54016 +
54017 +int
54018 +gr_init_uidset(void)
54019 +{
54020 + uid_set =
54021 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
54022 + uid_used = 0;
54023 +
54024 + return uid_set ? 1 : 0;
54025 +}
54026 +
54027 +void
54028 +gr_free_uidset(void)
54029 +{
54030 + if (uid_set)
54031 + kfree(uid_set);
54032 +
54033 + return;
54034 +}
54035 +
54036 +int
54037 +gr_find_uid(const uid_t uid)
54038 +{
54039 + struct crash_uid *tmp = uid_set;
54040 + uid_t buid;
54041 + int low = 0, high = uid_used - 1, mid;
54042 +
54043 + while (high >= low) {
54044 + mid = (low + high) >> 1;
54045 + buid = tmp[mid].uid;
54046 + if (buid == uid)
54047 + return mid;
54048 + if (buid > uid)
54049 + high = mid - 1;
54050 + if (buid < uid)
54051 + low = mid + 1;
54052 + }
54053 +
54054 + return -1;
54055 +}
54056 +
54057 +static __inline__ void
54058 +gr_insertsort(void)
54059 +{
54060 + unsigned short i, j;
54061 + struct crash_uid index;
54062 +
54063 + for (i = 1; i < uid_used; i++) {
54064 + index = uid_set[i];
54065 + j = i;
54066 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
54067 + uid_set[j] = uid_set[j - 1];
54068 + j--;
54069 + }
54070 + uid_set[j] = index;
54071 + }
54072 +
54073 + return;
54074 +}
54075 +
54076 +static __inline__ void
54077 +gr_insert_uid(const uid_t uid, const unsigned long expires)
54078 +{
54079 + int loc;
54080 +
54081 + if (uid_used == GR_UIDTABLE_MAX)
54082 + return;
54083 +
54084 + loc = gr_find_uid(uid);
54085 +
54086 + if (loc >= 0) {
54087 + uid_set[loc].expires = expires;
54088 + return;
54089 + }
54090 +
54091 + uid_set[uid_used].uid = uid;
54092 + uid_set[uid_used].expires = expires;
54093 + uid_used++;
54094 +
54095 + gr_insertsort();
54096 +
54097 + return;
54098 +}
54099 +
54100 +void
54101 +gr_remove_uid(const unsigned short loc)
54102 +{
54103 + unsigned short i;
54104 +
54105 + for (i = loc + 1; i < uid_used; i++)
54106 + uid_set[i - 1] = uid_set[i];
54107 +
54108 + uid_used--;
54109 +
54110 + return;
54111 +}
54112 +
54113 +int
54114 +gr_check_crash_uid(const uid_t uid)
54115 +{
54116 + int loc;
54117 + int ret = 0;
54118 +
54119 + if (unlikely(!gr_acl_is_enabled()))
54120 + return 0;
54121 +
54122 + spin_lock(&gr_uid_lock);
54123 + loc = gr_find_uid(uid);
54124 +
54125 + if (loc < 0)
54126 + goto out_unlock;
54127 +
54128 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
54129 + gr_remove_uid(loc);
54130 + else
54131 + ret = 1;
54132 +
54133 +out_unlock:
54134 + spin_unlock(&gr_uid_lock);
54135 + return ret;
54136 +}
54137 +
54138 +static __inline__ int
54139 +proc_is_setxid(const struct cred *cred)
54140 +{
54141 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
54142 + cred->uid != cred->fsuid)
54143 + return 1;
54144 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
54145 + cred->gid != cred->fsgid)
54146 + return 1;
54147 +
54148 + return 0;
54149 +}
54150 +
54151 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
54152 +
54153 +void
54154 +gr_handle_crash(struct task_struct *task, const int sig)
54155 +{
54156 + struct acl_subject_label *curr;
54157 + struct task_struct *tsk, *tsk2;
54158 + const struct cred *cred;
54159 + const struct cred *cred2;
54160 +
54161 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
54162 + return;
54163 +
54164 + if (unlikely(!gr_acl_is_enabled()))
54165 + return;
54166 +
54167 + curr = task->acl;
54168 +
54169 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
54170 + return;
54171 +
54172 + if (time_before_eq(curr->expires, get_seconds())) {
54173 + curr->expires = 0;
54174 + curr->crashes = 0;
54175 + }
54176 +
54177 + curr->crashes++;
54178 +
54179 + if (!curr->expires)
54180 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
54181 +
54182 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54183 + time_after(curr->expires, get_seconds())) {
54184 + rcu_read_lock();
54185 + cred = __task_cred(task);
54186 + if (cred->uid && proc_is_setxid(cred)) {
54187 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54188 + spin_lock(&gr_uid_lock);
54189 + gr_insert_uid(cred->uid, curr->expires);
54190 + spin_unlock(&gr_uid_lock);
54191 + curr->expires = 0;
54192 + curr->crashes = 0;
54193 + read_lock(&tasklist_lock);
54194 + do_each_thread(tsk2, tsk) {
54195 + cred2 = __task_cred(tsk);
54196 + if (tsk != task && cred2->uid == cred->uid)
54197 + gr_fake_force_sig(SIGKILL, tsk);
54198 + } while_each_thread(tsk2, tsk);
54199 + read_unlock(&tasklist_lock);
54200 + } else {
54201 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54202 + read_lock(&tasklist_lock);
54203 + read_lock(&grsec_exec_file_lock);
54204 + do_each_thread(tsk2, tsk) {
54205 + if (likely(tsk != task)) {
54206 + // if this thread has the same subject as the one that triggered
54207 + // RES_CRASH and it's the same binary, kill it
54208 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54209 + gr_fake_force_sig(SIGKILL, tsk);
54210 + }
54211 + } while_each_thread(tsk2, tsk);
54212 + read_unlock(&grsec_exec_file_lock);
54213 + read_unlock(&tasklist_lock);
54214 + }
54215 + rcu_read_unlock();
54216 + }
54217 +
54218 + return;
54219 +}
54220 +
54221 +int
54222 +gr_check_crash_exec(const struct file *filp)
54223 +{
54224 + struct acl_subject_label *curr;
54225 +
54226 + if (unlikely(!gr_acl_is_enabled()))
54227 + return 0;
54228 +
54229 + read_lock(&gr_inode_lock);
54230 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54231 + __get_dev(filp->f_path.dentry),
54232 + current->role);
54233 + read_unlock(&gr_inode_lock);
54234 +
54235 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54236 + (!curr->crashes && !curr->expires))
54237 + return 0;
54238 +
54239 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54240 + time_after(curr->expires, get_seconds()))
54241 + return 1;
54242 + else if (time_before_eq(curr->expires, get_seconds())) {
54243 + curr->crashes = 0;
54244 + curr->expires = 0;
54245 + }
54246 +
54247 + return 0;
54248 +}
54249 +
54250 +void
54251 +gr_handle_alertkill(struct task_struct *task)
54252 +{
54253 + struct acl_subject_label *curracl;
54254 + __u32 curr_ip;
54255 + struct task_struct *p, *p2;
54256 +
54257 + if (unlikely(!gr_acl_is_enabled()))
54258 + return;
54259 +
54260 + curracl = task->acl;
54261 + curr_ip = task->signal->curr_ip;
54262 +
54263 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54264 + read_lock(&tasklist_lock);
54265 + do_each_thread(p2, p) {
54266 + if (p->signal->curr_ip == curr_ip)
54267 + gr_fake_force_sig(SIGKILL, p);
54268 + } while_each_thread(p2, p);
54269 + read_unlock(&tasklist_lock);
54270 + } else if (curracl->mode & GR_KILLPROC)
54271 + gr_fake_force_sig(SIGKILL, task);
54272 +
54273 + return;
54274 +}
54275 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54276 new file mode 100644
54277 index 0000000..9d83a69
54278 --- /dev/null
54279 +++ b/grsecurity/gracl_shm.c
54280 @@ -0,0 +1,40 @@
54281 +#include <linux/kernel.h>
54282 +#include <linux/mm.h>
54283 +#include <linux/sched.h>
54284 +#include <linux/file.h>
54285 +#include <linux/ipc.h>
54286 +#include <linux/gracl.h>
54287 +#include <linux/grsecurity.h>
54288 +#include <linux/grinternal.h>
54289 +
54290 +int
54291 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54292 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54293 +{
54294 + struct task_struct *task;
54295 +
54296 + if (!gr_acl_is_enabled())
54297 + return 1;
54298 +
54299 + rcu_read_lock();
54300 + read_lock(&tasklist_lock);
54301 +
54302 + task = find_task_by_vpid(shm_cprid);
54303 +
54304 + if (unlikely(!task))
54305 + task = find_task_by_vpid(shm_lapid);
54306 +
54307 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54308 + (task->pid == shm_lapid)) &&
54309 + (task->acl->mode & GR_PROTSHM) &&
54310 + (task->acl != current->acl))) {
54311 + read_unlock(&tasklist_lock);
54312 + rcu_read_unlock();
54313 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54314 + return 0;
54315 + }
54316 + read_unlock(&tasklist_lock);
54317 + rcu_read_unlock();
54318 +
54319 + return 1;
54320 +}
54321 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54322 new file mode 100644
54323 index 0000000..bc0be01
54324 --- /dev/null
54325 +++ b/grsecurity/grsec_chdir.c
54326 @@ -0,0 +1,19 @@
54327 +#include <linux/kernel.h>
54328 +#include <linux/sched.h>
54329 +#include <linux/fs.h>
54330 +#include <linux/file.h>
54331 +#include <linux/grsecurity.h>
54332 +#include <linux/grinternal.h>
54333 +
54334 +void
54335 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54336 +{
54337 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54338 + if ((grsec_enable_chdir && grsec_enable_group &&
54339 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54340 + !grsec_enable_group)) {
54341 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54342 + }
54343 +#endif
54344 + return;
54345 +}
54346 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54347 new file mode 100644
54348 index 0000000..a2dc675
54349 --- /dev/null
54350 +++ b/grsecurity/grsec_chroot.c
54351 @@ -0,0 +1,351 @@
54352 +#include <linux/kernel.h>
54353 +#include <linux/module.h>
54354 +#include <linux/sched.h>
54355 +#include <linux/file.h>
54356 +#include <linux/fs.h>
54357 +#include <linux/mount.h>
54358 +#include <linux/types.h>
54359 +#include <linux/pid_namespace.h>
54360 +#include <linux/grsecurity.h>
54361 +#include <linux/grinternal.h>
54362 +
54363 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54364 +{
54365 +#ifdef CONFIG_GRKERNSEC
54366 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54367 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54368 + task->gr_is_chrooted = 1;
54369 + else
54370 + task->gr_is_chrooted = 0;
54371 +
54372 + task->gr_chroot_dentry = path->dentry;
54373 +#endif
54374 + return;
54375 +}
54376 +
54377 +void gr_clear_chroot_entries(struct task_struct *task)
54378 +{
54379 +#ifdef CONFIG_GRKERNSEC
54380 + task->gr_is_chrooted = 0;
54381 + task->gr_chroot_dentry = NULL;
54382 +#endif
54383 + return;
54384 +}
54385 +
54386 +int
54387 +gr_handle_chroot_unix(const pid_t pid)
54388 +{
54389 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54390 + struct task_struct *p;
54391 +
54392 + if (unlikely(!grsec_enable_chroot_unix))
54393 + return 1;
54394 +
54395 + if (likely(!proc_is_chrooted(current)))
54396 + return 1;
54397 +
54398 + rcu_read_lock();
54399 + read_lock(&tasklist_lock);
54400 + p = find_task_by_vpid_unrestricted(pid);
54401 + if (unlikely(p && !have_same_root(current, p))) {
54402 + read_unlock(&tasklist_lock);
54403 + rcu_read_unlock();
54404 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54405 + return 0;
54406 + }
54407 + read_unlock(&tasklist_lock);
54408 + rcu_read_unlock();
54409 +#endif
54410 + return 1;
54411 +}
54412 +
54413 +int
54414 +gr_handle_chroot_nice(void)
54415 +{
54416 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54417 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54418 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54419 + return -EPERM;
54420 + }
54421 +#endif
54422 + return 0;
54423 +}
54424 +
54425 +int
54426 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54427 +{
54428 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54429 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54430 + && proc_is_chrooted(current)) {
54431 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54432 + return -EACCES;
54433 + }
54434 +#endif
54435 + return 0;
54436 +}
54437 +
54438 +int
54439 +gr_handle_chroot_rawio(const struct inode *inode)
54440 +{
54441 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54442 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54443 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54444 + return 1;
54445 +#endif
54446 + return 0;
54447 +}
54448 +
54449 +int
54450 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54451 +{
54452 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54453 + struct task_struct *p;
54454 + int ret = 0;
54455 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54456 + return ret;
54457 +
54458 + read_lock(&tasklist_lock);
54459 + do_each_pid_task(pid, type, p) {
54460 + if (!have_same_root(current, p)) {
54461 + ret = 1;
54462 + goto out;
54463 + }
54464 + } while_each_pid_task(pid, type, p);
54465 +out:
54466 + read_unlock(&tasklist_lock);
54467 + return ret;
54468 +#endif
54469 + return 0;
54470 +}
54471 +
54472 +int
54473 +gr_pid_is_chrooted(struct task_struct *p)
54474 +{
54475 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54476 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54477 + return 0;
54478 +
54479 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54480 + !have_same_root(current, p)) {
54481 + return 1;
54482 + }
54483 +#endif
54484 + return 0;
54485 +}
54486 +
54487 +EXPORT_SYMBOL(gr_pid_is_chrooted);
54488 +
54489 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54490 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54491 +{
54492 + struct path path, currentroot;
54493 + int ret = 0;
54494 +
54495 + path.dentry = (struct dentry *)u_dentry;
54496 + path.mnt = (struct vfsmount *)u_mnt;
54497 + get_fs_root(current->fs, &currentroot);
54498 + if (path_is_under(&path, &currentroot))
54499 + ret = 1;
54500 + path_put(&currentroot);
54501 +
54502 + return ret;
54503 +}
54504 +#endif
54505 +
54506 +int
54507 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54508 +{
54509 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54510 + if (!grsec_enable_chroot_fchdir)
54511 + return 1;
54512 +
54513 + if (!proc_is_chrooted(current))
54514 + return 1;
54515 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54516 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54517 + return 0;
54518 + }
54519 +#endif
54520 + return 1;
54521 +}
54522 +
54523 +int
54524 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54525 + const time_t shm_createtime)
54526 +{
54527 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54528 + struct task_struct *p;
54529 + time_t starttime;
54530 +
54531 + if (unlikely(!grsec_enable_chroot_shmat))
54532 + return 1;
54533 +
54534 + if (likely(!proc_is_chrooted(current)))
54535 + return 1;
54536 +
54537 + rcu_read_lock();
54538 + read_lock(&tasklist_lock);
54539 +
54540 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54541 + starttime = p->start_time.tv_sec;
54542 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54543 + if (have_same_root(current, p)) {
54544 + goto allow;
54545 + } else {
54546 + read_unlock(&tasklist_lock);
54547 + rcu_read_unlock();
54548 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54549 + return 0;
54550 + }
54551 + }
54552 + /* creator exited, pid reuse, fall through to next check */
54553 + }
54554 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54555 + if (unlikely(!have_same_root(current, p))) {
54556 + read_unlock(&tasklist_lock);
54557 + rcu_read_unlock();
54558 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54559 + return 0;
54560 + }
54561 + }
54562 +
54563 +allow:
54564 + read_unlock(&tasklist_lock);
54565 + rcu_read_unlock();
54566 +#endif
54567 + return 1;
54568 +}
54569 +
54570 +void
54571 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54572 +{
54573 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54574 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54575 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54576 +#endif
54577 + return;
54578 +}
54579 +
54580 +int
54581 +gr_handle_chroot_mknod(const struct dentry *dentry,
54582 + const struct vfsmount *mnt, const int mode)
54583 +{
54584 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54585 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54586 + proc_is_chrooted(current)) {
54587 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54588 + return -EPERM;
54589 + }
54590 +#endif
54591 + return 0;
54592 +}
54593 +
54594 +int
54595 +gr_handle_chroot_mount(const struct dentry *dentry,
54596 + const struct vfsmount *mnt, const char *dev_name)
54597 +{
54598 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54599 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54600 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54601 + return -EPERM;
54602 + }
54603 +#endif
54604 + return 0;
54605 +}
54606 +
54607 +int
54608 +gr_handle_chroot_pivot(void)
54609 +{
54610 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54611 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54612 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54613 + return -EPERM;
54614 + }
54615 +#endif
54616 + return 0;
54617 +}
54618 +
54619 +int
54620 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54621 +{
54622 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54623 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54624 + !gr_is_outside_chroot(dentry, mnt)) {
54625 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54626 + return -EPERM;
54627 + }
54628 +#endif
54629 + return 0;
54630 +}
54631 +
54632 +extern const char *captab_log[];
54633 +extern int captab_log_entries;
54634 +
54635 +int
54636 +gr_chroot_is_capable(const int cap)
54637 +{
54638 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54639 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54640 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54641 + if (cap_raised(chroot_caps, cap)) {
54642 + const struct cred *creds = current_cred();
54643 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54644 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54645 + }
54646 + return 0;
54647 + }
54648 + }
54649 +#endif
54650 + return 1;
54651 +}
54652 +
54653 +int
54654 +gr_chroot_is_capable_nolog(const int cap)
54655 +{
54656 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54657 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54658 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54659 + if (cap_raised(chroot_caps, cap)) {
54660 + return 0;
54661 + }
54662 + }
54663 +#endif
54664 + return 1;
54665 +}
54666 +
54667 +int
54668 +gr_handle_chroot_sysctl(const int op)
54669 +{
54670 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54671 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54672 + proc_is_chrooted(current))
54673 + return -EACCES;
54674 +#endif
54675 + return 0;
54676 +}
54677 +
54678 +void
54679 +gr_handle_chroot_chdir(struct path *path)
54680 +{
54681 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54682 + if (grsec_enable_chroot_chdir)
54683 + set_fs_pwd(current->fs, path);
54684 +#endif
54685 + return;
54686 +}
54687 +
54688 +int
54689 +gr_handle_chroot_chmod(const struct dentry *dentry,
54690 + const struct vfsmount *mnt, const int mode)
54691 +{
54692 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54693 + /* allow chmod +s on directories, but not files */
54694 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54695 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54696 + proc_is_chrooted(current)) {
54697 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54698 + return -EPERM;
54699 + }
54700 +#endif
54701 + return 0;
54702 +}
54703 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54704 new file mode 100644
54705 index 0000000..213ad8b
54706 --- /dev/null
54707 +++ b/grsecurity/grsec_disabled.c
54708 @@ -0,0 +1,437 @@
54709 +#include <linux/kernel.h>
54710 +#include <linux/module.h>
54711 +#include <linux/sched.h>
54712 +#include <linux/file.h>
54713 +#include <linux/fs.h>
54714 +#include <linux/kdev_t.h>
54715 +#include <linux/net.h>
54716 +#include <linux/in.h>
54717 +#include <linux/ip.h>
54718 +#include <linux/skbuff.h>
54719 +#include <linux/sysctl.h>
54720 +
54721 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54722 +void
54723 +pax_set_initial_flags(struct linux_binprm *bprm)
54724 +{
54725 + return;
54726 +}
54727 +#endif
54728 +
54729 +#ifdef CONFIG_SYSCTL
54730 +__u32
54731 +gr_handle_sysctl(const struct ctl_table * table, const int op)
54732 +{
54733 + return 0;
54734 +}
54735 +#endif
54736 +
54737 +#ifdef CONFIG_TASKSTATS
54738 +int gr_is_taskstats_denied(int pid)
54739 +{
54740 + return 0;
54741 +}
54742 +#endif
54743 +
54744 +int
54745 +gr_acl_is_enabled(void)
54746 +{
54747 + return 0;
54748 +}
54749 +
54750 +void
54751 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54752 +{
54753 + return;
54754 +}
54755 +
54756 +int
54757 +gr_handle_rawio(const struct inode *inode)
54758 +{
54759 + return 0;
54760 +}
54761 +
54762 +void
54763 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54764 +{
54765 + return;
54766 +}
54767 +
54768 +int
54769 +gr_handle_ptrace(struct task_struct *task, const long request)
54770 +{
54771 + return 0;
54772 +}
54773 +
54774 +int
54775 +gr_handle_proc_ptrace(struct task_struct *task)
54776 +{
54777 + return 0;
54778 +}
54779 +
54780 +void
54781 +gr_learn_resource(const struct task_struct *task,
54782 + const int res, const unsigned long wanted, const int gt)
54783 +{
54784 + return;
54785 +}
54786 +
54787 +int
54788 +gr_set_acls(const int type)
54789 +{
54790 + return 0;
54791 +}
54792 +
54793 +int
54794 +gr_check_hidden_task(const struct task_struct *tsk)
54795 +{
54796 + return 0;
54797 +}
54798 +
54799 +int
54800 +gr_check_protected_task(const struct task_struct *task)
54801 +{
54802 + return 0;
54803 +}
54804 +
54805 +int
54806 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54807 +{
54808 + return 0;
54809 +}
54810 +
54811 +void
54812 +gr_copy_label(struct task_struct *tsk)
54813 +{
54814 + return;
54815 +}
54816 +
54817 +void
54818 +gr_set_pax_flags(struct task_struct *task)
54819 +{
54820 + return;
54821 +}
54822 +
54823 +int
54824 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54825 + const int unsafe_share)
54826 +{
54827 + return 0;
54828 +}
54829 +
54830 +void
54831 +gr_handle_delete(const ino_t ino, const dev_t dev)
54832 +{
54833 + return;
54834 +}
54835 +
54836 +void
54837 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54838 +{
54839 + return;
54840 +}
54841 +
54842 +void
54843 +gr_handle_crash(struct task_struct *task, const int sig)
54844 +{
54845 + return;
54846 +}
54847 +
54848 +int
54849 +gr_check_crash_exec(const struct file *filp)
54850 +{
54851 + return 0;
54852 +}
54853 +
54854 +int
54855 +gr_check_crash_uid(const uid_t uid)
54856 +{
54857 + return 0;
54858 +}
54859 +
54860 +void
54861 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54862 + struct dentry *old_dentry,
54863 + struct dentry *new_dentry,
54864 + struct vfsmount *mnt, const __u8 replace)
54865 +{
54866 + return;
54867 +}
54868 +
54869 +int
54870 +gr_search_socket(const int family, const int type, const int protocol)
54871 +{
54872 + return 1;
54873 +}
54874 +
54875 +int
54876 +gr_search_connectbind(const int mode, const struct socket *sock,
54877 + const struct sockaddr_in *addr)
54878 +{
54879 + return 0;
54880 +}
54881 +
54882 +void
54883 +gr_handle_alertkill(struct task_struct *task)
54884 +{
54885 + return;
54886 +}
54887 +
54888 +__u32
54889 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54890 +{
54891 + return 1;
54892 +}
54893 +
54894 +__u32
54895 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54896 + const struct vfsmount * mnt)
54897 +{
54898 + return 1;
54899 +}
54900 +
54901 +__u32
54902 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54903 + int acc_mode)
54904 +{
54905 + return 1;
54906 +}
54907 +
54908 +__u32
54909 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54910 +{
54911 + return 1;
54912 +}
54913 +
54914 +__u32
54915 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54916 +{
54917 + return 1;
54918 +}
54919 +
54920 +int
54921 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54922 + unsigned int *vm_flags)
54923 +{
54924 + return 1;
54925 +}
54926 +
54927 +__u32
54928 +gr_acl_handle_truncate(const struct dentry * dentry,
54929 + const struct vfsmount * mnt)
54930 +{
54931 + return 1;
54932 +}
54933 +
54934 +__u32
54935 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54936 +{
54937 + return 1;
54938 +}
54939 +
54940 +__u32
54941 +gr_acl_handle_access(const struct dentry * dentry,
54942 + const struct vfsmount * mnt, const int fmode)
54943 +{
54944 + return 1;
54945 +}
54946 +
54947 +__u32
54948 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54949 + umode_t *mode)
54950 +{
54951 + return 1;
54952 +}
54953 +
54954 +__u32
54955 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54956 +{
54957 + return 1;
54958 +}
54959 +
54960 +__u32
54961 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54962 +{
54963 + return 1;
54964 +}
54965 +
54966 +void
54967 +grsecurity_init(void)
54968 +{
54969 + return;
54970 +}
54971 +
54972 +umode_t gr_acl_umask(void)
54973 +{
54974 + return 0;
54975 +}
54976 +
54977 +__u32
54978 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54979 + const struct dentry * parent_dentry,
54980 + const struct vfsmount * parent_mnt,
54981 + const int mode)
54982 +{
54983 + return 1;
54984 +}
54985 +
54986 +__u32
54987 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
54988 + const struct dentry * parent_dentry,
54989 + const struct vfsmount * parent_mnt)
54990 +{
54991 + return 1;
54992 +}
54993 +
54994 +__u32
54995 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54996 + const struct dentry * parent_dentry,
54997 + const struct vfsmount * parent_mnt, const char *from)
54998 +{
54999 + return 1;
55000 +}
55001 +
55002 +__u32
55003 +gr_acl_handle_link(const struct dentry * new_dentry,
55004 + const struct dentry * parent_dentry,
55005 + const struct vfsmount * parent_mnt,
55006 + const struct dentry * old_dentry,
55007 + const struct vfsmount * old_mnt, const char *to)
55008 +{
55009 + return 1;
55010 +}
55011 +
55012 +int
55013 +gr_acl_handle_rename(const struct dentry *new_dentry,
55014 + const struct dentry *parent_dentry,
55015 + const struct vfsmount *parent_mnt,
55016 + const struct dentry *old_dentry,
55017 + const struct inode *old_parent_inode,
55018 + const struct vfsmount *old_mnt, const char *newname)
55019 +{
55020 + return 0;
55021 +}
55022 +
55023 +int
55024 +gr_acl_handle_filldir(const struct file *file, const char *name,
55025 + const int namelen, const ino_t ino)
55026 +{
55027 + return 1;
55028 +}
55029 +
55030 +int
55031 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55032 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55033 +{
55034 + return 1;
55035 +}
55036 +
55037 +int
55038 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
55039 +{
55040 + return 0;
55041 +}
55042 +
55043 +int
55044 +gr_search_accept(const struct socket *sock)
55045 +{
55046 + return 0;
55047 +}
55048 +
55049 +int
55050 +gr_search_listen(const struct socket *sock)
55051 +{
55052 + return 0;
55053 +}
55054 +
55055 +int
55056 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
55057 +{
55058 + return 0;
55059 +}
55060 +
55061 +__u32
55062 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
55063 +{
55064 + return 1;
55065 +}
55066 +
55067 +__u32
55068 +gr_acl_handle_creat(const struct dentry * dentry,
55069 + const struct dentry * p_dentry,
55070 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55071 + const int imode)
55072 +{
55073 + return 1;
55074 +}
55075 +
55076 +void
55077 +gr_acl_handle_exit(void)
55078 +{
55079 + return;
55080 +}
55081 +
55082 +int
55083 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55084 +{
55085 + return 1;
55086 +}
55087 +
55088 +void
55089 +gr_set_role_label(const uid_t uid, const gid_t gid)
55090 +{
55091 + return;
55092 +}
55093 +
55094 +int
55095 +gr_acl_handle_procpidmem(const struct task_struct *task)
55096 +{
55097 + return 0;
55098 +}
55099 +
55100 +int
55101 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
55102 +{
55103 + return 0;
55104 +}
55105 +
55106 +int
55107 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
55108 +{
55109 + return 0;
55110 +}
55111 +
55112 +void
55113 +gr_set_kernel_label(struct task_struct *task)
55114 +{
55115 + return;
55116 +}
55117 +
55118 +int
55119 +gr_check_user_change(int real, int effective, int fs)
55120 +{
55121 + return 0;
55122 +}
55123 +
55124 +int
55125 +gr_check_group_change(int real, int effective, int fs)
55126 +{
55127 + return 0;
55128 +}
55129 +
55130 +int gr_acl_enable_at_secure(void)
55131 +{
55132 + return 0;
55133 +}
55134 +
55135 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55136 +{
55137 + return dentry->d_inode->i_sb->s_dev;
55138 +}
55139 +
55140 +EXPORT_SYMBOL(gr_learn_resource);
55141 +EXPORT_SYMBOL(gr_set_kernel_label);
55142 +#ifdef CONFIG_SECURITY
55143 +EXPORT_SYMBOL(gr_check_user_change);
55144 +EXPORT_SYMBOL(gr_check_group_change);
55145 +#endif
55146 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
55147 new file mode 100644
55148 index 0000000..2b05ada
55149 --- /dev/null
55150 +++ b/grsecurity/grsec_exec.c
55151 @@ -0,0 +1,146 @@
55152 +#include <linux/kernel.h>
55153 +#include <linux/sched.h>
55154 +#include <linux/file.h>
55155 +#include <linux/binfmts.h>
55156 +#include <linux/fs.h>
55157 +#include <linux/types.h>
55158 +#include <linux/grdefs.h>
55159 +#include <linux/grsecurity.h>
55160 +#include <linux/grinternal.h>
55161 +#include <linux/capability.h>
55162 +#include <linux/module.h>
55163 +
55164 +#include <asm/uaccess.h>
55165 +
55166 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55167 +static char gr_exec_arg_buf[132];
55168 +static DEFINE_MUTEX(gr_exec_arg_mutex);
55169 +#endif
55170 +
55171 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
55172 +
55173 +void
55174 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
55175 +{
55176 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55177 + char *grarg = gr_exec_arg_buf;
55178 + unsigned int i, x, execlen = 0;
55179 + char c;
55180 +
55181 + if (!((grsec_enable_execlog && grsec_enable_group &&
55182 + in_group_p(grsec_audit_gid))
55183 + || (grsec_enable_execlog && !grsec_enable_group)))
55184 + return;
55185 +
55186 + mutex_lock(&gr_exec_arg_mutex);
55187 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
55188 +
55189 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
55190 + const char __user *p;
55191 + unsigned int len;
55192 +
55193 + p = get_user_arg_ptr(argv, i);
55194 + if (IS_ERR(p))
55195 + goto log;
55196 +
55197 + len = strnlen_user(p, 128 - execlen);
55198 + if (len > 128 - execlen)
55199 + len = 128 - execlen;
55200 + else if (len > 0)
55201 + len--;
55202 + if (copy_from_user(grarg + execlen, p, len))
55203 + goto log;
55204 +
55205 + /* rewrite unprintable characters */
55206 + for (x = 0; x < len; x++) {
55207 + c = *(grarg + execlen + x);
55208 + if (c < 32 || c > 126)
55209 + *(grarg + execlen + x) = ' ';
55210 + }
55211 +
55212 + execlen += len;
55213 + *(grarg + execlen) = ' ';
55214 + *(grarg + execlen + 1) = '\0';
55215 + execlen++;
55216 + }
55217 +
55218 + log:
55219 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55220 + bprm->file->f_path.mnt, grarg);
55221 + mutex_unlock(&gr_exec_arg_mutex);
55222 +#endif
55223 + return;
55224 +}
55225 +
55226 +#ifdef CONFIG_GRKERNSEC
55227 +extern int gr_acl_is_capable(const int cap);
55228 +extern int gr_acl_is_capable_nolog(const int cap);
55229 +extern int gr_chroot_is_capable(const int cap);
55230 +extern int gr_chroot_is_capable_nolog(const int cap);
55231 +#endif
55232 +
55233 +const char *captab_log[] = {
55234 + "CAP_CHOWN",
55235 + "CAP_DAC_OVERRIDE",
55236 + "CAP_DAC_READ_SEARCH",
55237 + "CAP_FOWNER",
55238 + "CAP_FSETID",
55239 + "CAP_KILL",
55240 + "CAP_SETGID",
55241 + "CAP_SETUID",
55242 + "CAP_SETPCAP",
55243 + "CAP_LINUX_IMMUTABLE",
55244 + "CAP_NET_BIND_SERVICE",
55245 + "CAP_NET_BROADCAST",
55246 + "CAP_NET_ADMIN",
55247 + "CAP_NET_RAW",
55248 + "CAP_IPC_LOCK",
55249 + "CAP_IPC_OWNER",
55250 + "CAP_SYS_MODULE",
55251 + "CAP_SYS_RAWIO",
55252 + "CAP_SYS_CHROOT",
55253 + "CAP_SYS_PTRACE",
55254 + "CAP_SYS_PACCT",
55255 + "CAP_SYS_ADMIN",
55256 + "CAP_SYS_BOOT",
55257 + "CAP_SYS_NICE",
55258 + "CAP_SYS_RESOURCE",
55259 + "CAP_SYS_TIME",
55260 + "CAP_SYS_TTY_CONFIG",
55261 + "CAP_MKNOD",
55262 + "CAP_LEASE",
55263 + "CAP_AUDIT_WRITE",
55264 + "CAP_AUDIT_CONTROL",
55265 + "CAP_SETFCAP",
55266 + "CAP_MAC_OVERRIDE",
55267 + "CAP_MAC_ADMIN",
55268 + "CAP_SYSLOG",
55269 + "CAP_WAKE_ALARM"
55270 +};
55271 +
55272 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55273 +
55274 +int gr_is_capable(const int cap)
55275 +{
55276 +#ifdef CONFIG_GRKERNSEC
55277 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55278 + return 1;
55279 + return 0;
55280 +#else
55281 + return 1;
55282 +#endif
55283 +}
55284 +
55285 +int gr_is_capable_nolog(const int cap)
55286 +{
55287 +#ifdef CONFIG_GRKERNSEC
55288 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55289 + return 1;
55290 + return 0;
55291 +#else
55292 + return 1;
55293 +#endif
55294 +}
55295 +
55296 +EXPORT_SYMBOL(gr_is_capable);
55297 +EXPORT_SYMBOL(gr_is_capable_nolog);
55298 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55299 new file mode 100644
55300 index 0000000..d3ee748
55301 --- /dev/null
55302 +++ b/grsecurity/grsec_fifo.c
55303 @@ -0,0 +1,24 @@
55304 +#include <linux/kernel.h>
55305 +#include <linux/sched.h>
55306 +#include <linux/fs.h>
55307 +#include <linux/file.h>
55308 +#include <linux/grinternal.h>
55309 +
55310 +int
55311 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55312 + const struct dentry *dir, const int flag, const int acc_mode)
55313 +{
55314 +#ifdef CONFIG_GRKERNSEC_FIFO
55315 + const struct cred *cred = current_cred();
55316 +
55317 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55318 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55319 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55320 + (cred->fsuid != dentry->d_inode->i_uid)) {
55321 + if (!inode_permission(dentry->d_inode, acc_mode))
55322 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55323 + return -EACCES;
55324 + }
55325 +#endif
55326 + return 0;
55327 +}
55328 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55329 new file mode 100644
55330 index 0000000..8ca18bf
55331 --- /dev/null
55332 +++ b/grsecurity/grsec_fork.c
55333 @@ -0,0 +1,23 @@
55334 +#include <linux/kernel.h>
55335 +#include <linux/sched.h>
55336 +#include <linux/grsecurity.h>
55337 +#include <linux/grinternal.h>
55338 +#include <linux/errno.h>
55339 +
55340 +void
55341 +gr_log_forkfail(const int retval)
55342 +{
55343 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55344 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55345 + switch (retval) {
55346 + case -EAGAIN:
55347 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55348 + break;
55349 + case -ENOMEM:
55350 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55351 + break;
55352 + }
55353 + }
55354 +#endif
55355 + return;
55356 +}
55357 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55358 new file mode 100644
55359 index 0000000..01ddde4
55360 --- /dev/null
55361 +++ b/grsecurity/grsec_init.c
55362 @@ -0,0 +1,277 @@
55363 +#include <linux/kernel.h>
55364 +#include <linux/sched.h>
55365 +#include <linux/mm.h>
55366 +#include <linux/gracl.h>
55367 +#include <linux/slab.h>
55368 +#include <linux/vmalloc.h>
55369 +#include <linux/percpu.h>
55370 +#include <linux/module.h>
55371 +
55372 +int grsec_enable_ptrace_readexec;
55373 +int grsec_enable_setxid;
55374 +int grsec_enable_brute;
55375 +int grsec_enable_link;
55376 +int grsec_enable_dmesg;
55377 +int grsec_enable_harden_ptrace;
55378 +int grsec_enable_fifo;
55379 +int grsec_enable_execlog;
55380 +int grsec_enable_signal;
55381 +int grsec_enable_forkfail;
55382 +int grsec_enable_audit_ptrace;
55383 +int grsec_enable_time;
55384 +int grsec_enable_audit_textrel;
55385 +int grsec_enable_group;
55386 +int grsec_audit_gid;
55387 +int grsec_enable_chdir;
55388 +int grsec_enable_mount;
55389 +int grsec_enable_rofs;
55390 +int grsec_enable_chroot_findtask;
55391 +int grsec_enable_chroot_mount;
55392 +int grsec_enable_chroot_shmat;
55393 +int grsec_enable_chroot_fchdir;
55394 +int grsec_enable_chroot_double;
55395 +int grsec_enable_chroot_pivot;
55396 +int grsec_enable_chroot_chdir;
55397 +int grsec_enable_chroot_chmod;
55398 +int grsec_enable_chroot_mknod;
55399 +int grsec_enable_chroot_nice;
55400 +int grsec_enable_chroot_execlog;
55401 +int grsec_enable_chroot_caps;
55402 +int grsec_enable_chroot_sysctl;
55403 +int grsec_enable_chroot_unix;
55404 +int grsec_enable_tpe;
55405 +int grsec_tpe_gid;
55406 +int grsec_enable_blackhole;
55407 +#ifdef CONFIG_IPV6_MODULE
55408 +EXPORT_SYMBOL(grsec_enable_blackhole);
55409 +#endif
55410 +int grsec_lastack_retries;
55411 +int grsec_enable_tpe_all;
55412 +int grsec_enable_tpe_invert;
55413 +int grsec_enable_socket_all;
55414 +int grsec_socket_all_gid;
55415 +int grsec_enable_socket_client;
55416 +int grsec_socket_client_gid;
55417 +int grsec_enable_socket_server;
55418 +int grsec_socket_server_gid;
55419 +int grsec_resource_logging;
55420 +int grsec_disable_privio;
55421 +int grsec_enable_log_rwxmaps;
55422 +int grsec_lock;
55423 +
55424 +DEFINE_SPINLOCK(grsec_alert_lock);
55425 +unsigned long grsec_alert_wtime = 0;
55426 +unsigned long grsec_alert_fyet = 0;
55427 +
55428 +DEFINE_SPINLOCK(grsec_audit_lock);
55429 +
55430 +DEFINE_RWLOCK(grsec_exec_file_lock);
55431 +
55432 +char *gr_shared_page[4];
55433 +
55434 +char *gr_alert_log_fmt;
55435 +char *gr_audit_log_fmt;
55436 +char *gr_alert_log_buf;
55437 +char *gr_audit_log_buf;
55438 +
55439 +extern struct gr_arg *gr_usermode;
55440 +extern unsigned char *gr_system_salt;
55441 +extern unsigned char *gr_system_sum;
55442 +
55443 +void __init
55444 +grsecurity_init(void)
55445 +{
55446 + int j;
55447 + /* create the per-cpu shared pages */
55448 +
55449 +#ifdef CONFIG_X86
55450 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55451 +#endif
55452 +
55453 + for (j = 0; j < 4; j++) {
55454 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55455 + if (gr_shared_page[j] == NULL) {
55456 + panic("Unable to allocate grsecurity shared page");
55457 + return;
55458 + }
55459 + }
55460 +
55461 + /* allocate log buffers */
55462 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55463 + if (!gr_alert_log_fmt) {
55464 + panic("Unable to allocate grsecurity alert log format buffer");
55465 + return;
55466 + }
55467 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55468 + if (!gr_audit_log_fmt) {
55469 + panic("Unable to allocate grsecurity audit log format buffer");
55470 + return;
55471 + }
55472 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55473 + if (!gr_alert_log_buf) {
55474 + panic("Unable to allocate grsecurity alert log buffer");
55475 + return;
55476 + }
55477 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55478 + if (!gr_audit_log_buf) {
55479 + panic("Unable to allocate grsecurity audit log buffer");
55480 + return;
55481 + }
55482 +
55483 + /* allocate memory for authentication structure */
55484 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55485 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55486 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55487 +
55488 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55489 + panic("Unable to allocate grsecurity authentication structure");
55490 + return;
55491 + }
55492 +
55493 +
55494 +#ifdef CONFIG_GRKERNSEC_IO
55495 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55496 + grsec_disable_privio = 1;
55497 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55498 + grsec_disable_privio = 1;
55499 +#else
55500 + grsec_disable_privio = 0;
55501 +#endif
55502 +#endif
55503 +
55504 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55505 + /* for backward compatibility, tpe_invert always defaults to on if
55506 + enabled in the kernel
55507 + */
55508 + grsec_enable_tpe_invert = 1;
55509 +#endif
55510 +
55511 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55512 +#ifndef CONFIG_GRKERNSEC_SYSCTL
55513 + grsec_lock = 1;
55514 +#endif
55515 +
55516 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55517 + grsec_enable_audit_textrel = 1;
55518 +#endif
55519 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55520 + grsec_enable_log_rwxmaps = 1;
55521 +#endif
55522 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55523 + grsec_enable_group = 1;
55524 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55525 +#endif
55526 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55527 + grsec_enable_ptrace_readexec = 1;
55528 +#endif
55529 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55530 + grsec_enable_chdir = 1;
55531 +#endif
55532 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55533 + grsec_enable_harden_ptrace = 1;
55534 +#endif
55535 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55536 + grsec_enable_mount = 1;
55537 +#endif
55538 +#ifdef CONFIG_GRKERNSEC_LINK
55539 + grsec_enable_link = 1;
55540 +#endif
55541 +#ifdef CONFIG_GRKERNSEC_BRUTE
55542 + grsec_enable_brute = 1;
55543 +#endif
55544 +#ifdef CONFIG_GRKERNSEC_DMESG
55545 + grsec_enable_dmesg = 1;
55546 +#endif
55547 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55548 + grsec_enable_blackhole = 1;
55549 + grsec_lastack_retries = 4;
55550 +#endif
55551 +#ifdef CONFIG_GRKERNSEC_FIFO
55552 + grsec_enable_fifo = 1;
55553 +#endif
55554 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55555 + grsec_enable_execlog = 1;
55556 +#endif
55557 +#ifdef CONFIG_GRKERNSEC_SETXID
55558 + grsec_enable_setxid = 1;
55559 +#endif
55560 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55561 + grsec_enable_signal = 1;
55562 +#endif
55563 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55564 + grsec_enable_forkfail = 1;
55565 +#endif
55566 +#ifdef CONFIG_GRKERNSEC_TIME
55567 + grsec_enable_time = 1;
55568 +#endif
55569 +#ifdef CONFIG_GRKERNSEC_RESLOG
55570 + grsec_resource_logging = 1;
55571 +#endif
55572 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55573 + grsec_enable_chroot_findtask = 1;
55574 +#endif
55575 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55576 + grsec_enable_chroot_unix = 1;
55577 +#endif
55578 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55579 + grsec_enable_chroot_mount = 1;
55580 +#endif
55581 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55582 + grsec_enable_chroot_fchdir = 1;
55583 +#endif
55584 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55585 + grsec_enable_chroot_shmat = 1;
55586 +#endif
55587 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55588 + grsec_enable_audit_ptrace = 1;
55589 +#endif
55590 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55591 + grsec_enable_chroot_double = 1;
55592 +#endif
55593 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55594 + grsec_enable_chroot_pivot = 1;
55595 +#endif
55596 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55597 + grsec_enable_chroot_chdir = 1;
55598 +#endif
55599 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55600 + grsec_enable_chroot_chmod = 1;
55601 +#endif
55602 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55603 + grsec_enable_chroot_mknod = 1;
55604 +#endif
55605 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55606 + grsec_enable_chroot_nice = 1;
55607 +#endif
55608 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55609 + grsec_enable_chroot_execlog = 1;
55610 +#endif
55611 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55612 + grsec_enable_chroot_caps = 1;
55613 +#endif
55614 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55615 + grsec_enable_chroot_sysctl = 1;
55616 +#endif
55617 +#ifdef CONFIG_GRKERNSEC_TPE
55618 + grsec_enable_tpe = 1;
55619 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55620 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55621 + grsec_enable_tpe_all = 1;
55622 +#endif
55623 +#endif
55624 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55625 + grsec_enable_socket_all = 1;
55626 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55627 +#endif
55628 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55629 + grsec_enable_socket_client = 1;
55630 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55631 +#endif
55632 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55633 + grsec_enable_socket_server = 1;
55634 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55635 +#endif
55636 +#endif
55637 +
55638 + return;
55639 +}
55640 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55641 new file mode 100644
55642 index 0000000..3efe141
55643 --- /dev/null
55644 +++ b/grsecurity/grsec_link.c
55645 @@ -0,0 +1,43 @@
55646 +#include <linux/kernel.h>
55647 +#include <linux/sched.h>
55648 +#include <linux/fs.h>
55649 +#include <linux/file.h>
55650 +#include <linux/grinternal.h>
55651 +
55652 +int
55653 +gr_handle_follow_link(const struct inode *parent,
55654 + const struct inode *inode,
55655 + const struct dentry *dentry, const struct vfsmount *mnt)
55656 +{
55657 +#ifdef CONFIG_GRKERNSEC_LINK
55658 + const struct cred *cred = current_cred();
55659 +
55660 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55661 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55662 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55663 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55664 + return -EACCES;
55665 + }
55666 +#endif
55667 + return 0;
55668 +}
55669 +
55670 +int
55671 +gr_handle_hardlink(const struct dentry *dentry,
55672 + const struct vfsmount *mnt,
55673 + struct inode *inode, const int mode, const char *to)
55674 +{
55675 +#ifdef CONFIG_GRKERNSEC_LINK
55676 + const struct cred *cred = current_cred();
55677 +
55678 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55679 + (!S_ISREG(mode) || (mode & S_ISUID) ||
55680 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55681 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55682 + !capable(CAP_FOWNER) && cred->uid) {
55683 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55684 + return -EPERM;
55685 + }
55686 +#endif
55687 + return 0;
55688 +}
55689 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55690 new file mode 100644
55691 index 0000000..a45d2e9
55692 --- /dev/null
55693 +++ b/grsecurity/grsec_log.c
55694 @@ -0,0 +1,322 @@
55695 +#include <linux/kernel.h>
55696 +#include <linux/sched.h>
55697 +#include <linux/file.h>
55698 +#include <linux/tty.h>
55699 +#include <linux/fs.h>
55700 +#include <linux/grinternal.h>
55701 +
55702 +#ifdef CONFIG_TREE_PREEMPT_RCU
55703 +#define DISABLE_PREEMPT() preempt_disable()
55704 +#define ENABLE_PREEMPT() preempt_enable()
55705 +#else
55706 +#define DISABLE_PREEMPT()
55707 +#define ENABLE_PREEMPT()
55708 +#endif
55709 +
55710 +#define BEGIN_LOCKS(x) \
55711 + DISABLE_PREEMPT(); \
55712 + rcu_read_lock(); \
55713 + read_lock(&tasklist_lock); \
55714 + read_lock(&grsec_exec_file_lock); \
55715 + if (x != GR_DO_AUDIT) \
55716 + spin_lock(&grsec_alert_lock); \
55717 + else \
55718 + spin_lock(&grsec_audit_lock)
55719 +
55720 +#define END_LOCKS(x) \
55721 + if (x != GR_DO_AUDIT) \
55722 + spin_unlock(&grsec_alert_lock); \
55723 + else \
55724 + spin_unlock(&grsec_audit_lock); \
55725 + read_unlock(&grsec_exec_file_lock); \
55726 + read_unlock(&tasklist_lock); \
55727 + rcu_read_unlock(); \
55728 + ENABLE_PREEMPT(); \
55729 + if (x == GR_DONT_AUDIT) \
55730 + gr_handle_alertkill(current)
55731 +
55732 +enum {
55733 + FLOODING,
55734 + NO_FLOODING
55735 +};
55736 +
55737 +extern char *gr_alert_log_fmt;
55738 +extern char *gr_audit_log_fmt;
55739 +extern char *gr_alert_log_buf;
55740 +extern char *gr_audit_log_buf;
55741 +
55742 +static int gr_log_start(int audit)
55743 +{
55744 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55745 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55746 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55747 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55748 + unsigned long curr_secs = get_seconds();
55749 +
55750 + if (audit == GR_DO_AUDIT)
55751 + goto set_fmt;
55752 +
55753 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55754 + grsec_alert_wtime = curr_secs;
55755 + grsec_alert_fyet = 0;
55756 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55757 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55758 + grsec_alert_fyet++;
55759 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55760 + grsec_alert_wtime = curr_secs;
55761 + grsec_alert_fyet++;
55762 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55763 + return FLOODING;
55764 + }
55765 + else return FLOODING;
55766 +
55767 +set_fmt:
55768 +#endif
55769 + memset(buf, 0, PAGE_SIZE);
55770 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
55771 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55772 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55773 + } else if (current->signal->curr_ip) {
55774 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55775 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55776 + } else if (gr_acl_is_enabled()) {
55777 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55778 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55779 + } else {
55780 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
55781 + strcpy(buf, fmt);
55782 + }
55783 +
55784 + return NO_FLOODING;
55785 +}
55786 +
55787 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55788 + __attribute__ ((format (printf, 2, 0)));
55789 +
55790 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55791 +{
55792 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55793 + unsigned int len = strlen(buf);
55794 +
55795 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55796 +
55797 + return;
55798 +}
55799 +
55800 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55801 + __attribute__ ((format (printf, 2, 3)));
55802 +
55803 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55804 +{
55805 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55806 + unsigned int len = strlen(buf);
55807 + va_list ap;
55808 +
55809 + va_start(ap, msg);
55810 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55811 + va_end(ap);
55812 +
55813 + return;
55814 +}
55815 +
55816 +static void gr_log_end(int audit, int append_default)
55817 +{
55818 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55819 +
55820 + if (append_default) {
55821 + unsigned int len = strlen(buf);
55822 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55823 + }
55824 +
55825 + printk("%s\n", buf);
55826 +
55827 + return;
55828 +}
55829 +
55830 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55831 +{
55832 + int logtype;
55833 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55834 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55835 + void *voidptr = NULL;
55836 + int num1 = 0, num2 = 0;
55837 + unsigned long ulong1 = 0, ulong2 = 0;
55838 + struct dentry *dentry = NULL;
55839 + struct vfsmount *mnt = NULL;
55840 + struct file *file = NULL;
55841 + struct task_struct *task = NULL;
55842 + const struct cred *cred, *pcred;
55843 + va_list ap;
55844 +
55845 + BEGIN_LOCKS(audit);
55846 + logtype = gr_log_start(audit);
55847 + if (logtype == FLOODING) {
55848 + END_LOCKS(audit);
55849 + return;
55850 + }
55851 + va_start(ap, argtypes);
55852 + switch (argtypes) {
55853 + case GR_TTYSNIFF:
55854 + task = va_arg(ap, struct task_struct *);
55855 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55856 + break;
55857 + case GR_SYSCTL_HIDDEN:
55858 + str1 = va_arg(ap, char *);
55859 + gr_log_middle_varargs(audit, msg, result, str1);
55860 + break;
55861 + case GR_RBAC:
55862 + dentry = va_arg(ap, struct dentry *);
55863 + mnt = va_arg(ap, struct vfsmount *);
55864 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55865 + break;
55866 + case GR_RBAC_STR:
55867 + dentry = va_arg(ap, struct dentry *);
55868 + mnt = va_arg(ap, struct vfsmount *);
55869 + str1 = va_arg(ap, char *);
55870 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55871 + break;
55872 + case GR_STR_RBAC:
55873 + str1 = va_arg(ap, char *);
55874 + dentry = va_arg(ap, struct dentry *);
55875 + mnt = va_arg(ap, struct vfsmount *);
55876 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55877 + break;
55878 + case GR_RBAC_MODE2:
55879 + dentry = va_arg(ap, struct dentry *);
55880 + mnt = va_arg(ap, struct vfsmount *);
55881 + str1 = va_arg(ap, char *);
55882 + str2 = va_arg(ap, char *);
55883 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55884 + break;
55885 + case GR_RBAC_MODE3:
55886 + dentry = va_arg(ap, struct dentry *);
55887 + mnt = va_arg(ap, struct vfsmount *);
55888 + str1 = va_arg(ap, char *);
55889 + str2 = va_arg(ap, char *);
55890 + str3 = va_arg(ap, char *);
55891 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55892 + break;
55893 + case GR_FILENAME:
55894 + dentry = va_arg(ap, struct dentry *);
55895 + mnt = va_arg(ap, struct vfsmount *);
55896 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55897 + break;
55898 + case GR_STR_FILENAME:
55899 + str1 = va_arg(ap, char *);
55900 + dentry = va_arg(ap, struct dentry *);
55901 + mnt = va_arg(ap, struct vfsmount *);
55902 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55903 + break;
55904 + case GR_FILENAME_STR:
55905 + dentry = va_arg(ap, struct dentry *);
55906 + mnt = va_arg(ap, struct vfsmount *);
55907 + str1 = va_arg(ap, char *);
55908 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55909 + break;
55910 + case GR_FILENAME_TWO_INT:
55911 + dentry = va_arg(ap, struct dentry *);
55912 + mnt = va_arg(ap, struct vfsmount *);
55913 + num1 = va_arg(ap, int);
55914 + num2 = va_arg(ap, int);
55915 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55916 + break;
55917 + case GR_FILENAME_TWO_INT_STR:
55918 + dentry = va_arg(ap, struct dentry *);
55919 + mnt = va_arg(ap, struct vfsmount *);
55920 + num1 = va_arg(ap, int);
55921 + num2 = va_arg(ap, int);
55922 + str1 = va_arg(ap, char *);
55923 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55924 + break;
55925 + case GR_TEXTREL:
55926 + file = va_arg(ap, struct file *);
55927 + ulong1 = va_arg(ap, unsigned long);
55928 + ulong2 = va_arg(ap, unsigned long);
55929 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55930 + break;
55931 + case GR_PTRACE:
55932 + task = va_arg(ap, struct task_struct *);
55933 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55934 + break;
55935 + case GR_RESOURCE:
55936 + task = va_arg(ap, struct task_struct *);
55937 + cred = __task_cred(task);
55938 + pcred = __task_cred(task->real_parent);
55939 + ulong1 = va_arg(ap, unsigned long);
55940 + str1 = va_arg(ap, char *);
55941 + ulong2 = va_arg(ap, unsigned long);
55942 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55943 + break;
55944 + case GR_CAP:
55945 + task = va_arg(ap, struct task_struct *);
55946 + cred = __task_cred(task);
55947 + pcred = __task_cred(task->real_parent);
55948 + str1 = va_arg(ap, char *);
55949 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55950 + break;
55951 + case GR_SIG:
55952 + str1 = va_arg(ap, char *);
55953 + voidptr = va_arg(ap, void *);
55954 + gr_log_middle_varargs(audit, msg, str1, voidptr);
55955 + break;
55956 + case GR_SIG2:
55957 + task = va_arg(ap, struct task_struct *);
55958 + cred = __task_cred(task);
55959 + pcred = __task_cred(task->real_parent);
55960 + num1 = va_arg(ap, int);
55961 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55962 + break;
55963 + case GR_CRASH1:
55964 + task = va_arg(ap, struct task_struct *);
55965 + cred = __task_cred(task);
55966 + pcred = __task_cred(task->real_parent);
55967 + ulong1 = va_arg(ap, unsigned long);
55968 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55969 + break;
55970 + case GR_CRASH2:
55971 + task = va_arg(ap, struct task_struct *);
55972 + cred = __task_cred(task);
55973 + pcred = __task_cred(task->real_parent);
55974 + ulong1 = va_arg(ap, unsigned long);
55975 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55976 + break;
55977 + case GR_RWXMAP:
55978 + file = va_arg(ap, struct file *);
55979 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55980 + break;
55981 + case GR_PSACCT:
55982 + {
55983 + unsigned int wday, cday;
55984 + __u8 whr, chr;
55985 + __u8 wmin, cmin;
55986 + __u8 wsec, csec;
55987 + char cur_tty[64] = { 0 };
55988 + char parent_tty[64] = { 0 };
55989 +
55990 + task = va_arg(ap, struct task_struct *);
55991 + wday = va_arg(ap, unsigned int);
55992 + cday = va_arg(ap, unsigned int);
55993 + whr = va_arg(ap, int);
55994 + chr = va_arg(ap, int);
55995 + wmin = va_arg(ap, int);
55996 + cmin = va_arg(ap, int);
55997 + wsec = va_arg(ap, int);
55998 + csec = va_arg(ap, int);
55999 + ulong1 = va_arg(ap, unsigned long);
56000 + cred = __task_cred(task);
56001 + pcred = __task_cred(task->real_parent);
56002 +
56003 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
56004 + }
56005 + break;
56006 + default:
56007 + gr_log_middle(audit, msg, ap);
56008 + }
56009 + va_end(ap);
56010 + // these don't need DEFAULTSECARGS printed on the end
56011 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
56012 + gr_log_end(audit, 0);
56013 + else
56014 + gr_log_end(audit, 1);
56015 + END_LOCKS(audit);
56016 +}
56017 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
56018 new file mode 100644
56019 index 0000000..f536303
56020 --- /dev/null
56021 +++ b/grsecurity/grsec_mem.c
56022 @@ -0,0 +1,40 @@
56023 +#include <linux/kernel.h>
56024 +#include <linux/sched.h>
56025 +#include <linux/mm.h>
56026 +#include <linux/mman.h>
56027 +#include <linux/grinternal.h>
56028 +
56029 +void
56030 +gr_handle_ioperm(void)
56031 +{
56032 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
56033 + return;
56034 +}
56035 +
56036 +void
56037 +gr_handle_iopl(void)
56038 +{
56039 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
56040 + return;
56041 +}
56042 +
56043 +void
56044 +gr_handle_mem_readwrite(u64 from, u64 to)
56045 +{
56046 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
56047 + return;
56048 +}
56049 +
56050 +void
56051 +gr_handle_vm86(void)
56052 +{
56053 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
56054 + return;
56055 +}
56056 +
56057 +void
56058 +gr_log_badprocpid(const char *entry)
56059 +{
56060 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
56061 + return;
56062 +}
56063 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
56064 new file mode 100644
56065 index 0000000..2131422
56066 --- /dev/null
56067 +++ b/grsecurity/grsec_mount.c
56068 @@ -0,0 +1,62 @@
56069 +#include <linux/kernel.h>
56070 +#include <linux/sched.h>
56071 +#include <linux/mount.h>
56072 +#include <linux/grsecurity.h>
56073 +#include <linux/grinternal.h>
56074 +
56075 +void
56076 +gr_log_remount(const char *devname, const int retval)
56077 +{
56078 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56079 + if (grsec_enable_mount && (retval >= 0))
56080 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
56081 +#endif
56082 + return;
56083 +}
56084 +
56085 +void
56086 +gr_log_unmount(const char *devname, const int retval)
56087 +{
56088 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56089 + if (grsec_enable_mount && (retval >= 0))
56090 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
56091 +#endif
56092 + return;
56093 +}
56094 +
56095 +void
56096 +gr_log_mount(const char *from, const char *to, const int retval)
56097 +{
56098 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56099 + if (grsec_enable_mount && (retval >= 0))
56100 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
56101 +#endif
56102 + return;
56103 +}
56104 +
56105 +int
56106 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
56107 +{
56108 +#ifdef CONFIG_GRKERNSEC_ROFS
56109 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
56110 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
56111 + return -EPERM;
56112 + } else
56113 + return 0;
56114 +#endif
56115 + return 0;
56116 +}
56117 +
56118 +int
56119 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
56120 +{
56121 +#ifdef CONFIG_GRKERNSEC_ROFS
56122 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
56123 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
56124 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
56125 + return -EPERM;
56126 + } else
56127 + return 0;
56128 +#endif
56129 + return 0;
56130 +}
56131 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
56132 new file mode 100644
56133 index 0000000..a3b12a0
56134 --- /dev/null
56135 +++ b/grsecurity/grsec_pax.c
56136 @@ -0,0 +1,36 @@
56137 +#include <linux/kernel.h>
56138 +#include <linux/sched.h>
56139 +#include <linux/mm.h>
56140 +#include <linux/file.h>
56141 +#include <linux/grinternal.h>
56142 +#include <linux/grsecurity.h>
56143 +
56144 +void
56145 +gr_log_textrel(struct vm_area_struct * vma)
56146 +{
56147 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56148 + if (grsec_enable_audit_textrel)
56149 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
56150 +#endif
56151 + return;
56152 +}
56153 +
56154 +void
56155 +gr_log_rwxmmap(struct file *file)
56156 +{
56157 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56158 + if (grsec_enable_log_rwxmaps)
56159 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
56160 +#endif
56161 + return;
56162 +}
56163 +
56164 +void
56165 +gr_log_rwxmprotect(struct file *file)
56166 +{
56167 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56168 + if (grsec_enable_log_rwxmaps)
56169 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
56170 +#endif
56171 + return;
56172 +}
56173 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
56174 new file mode 100644
56175 index 0000000..f7f29aa
56176 --- /dev/null
56177 +++ b/grsecurity/grsec_ptrace.c
56178 @@ -0,0 +1,30 @@
56179 +#include <linux/kernel.h>
56180 +#include <linux/sched.h>
56181 +#include <linux/grinternal.h>
56182 +#include <linux/security.h>
56183 +
56184 +void
56185 +gr_audit_ptrace(struct task_struct *task)
56186 +{
56187 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56188 + if (grsec_enable_audit_ptrace)
56189 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
56190 +#endif
56191 + return;
56192 +}
56193 +
56194 +int
56195 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
56196 +{
56197 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56198 + const struct dentry *dentry = file->f_path.dentry;
56199 + const struct vfsmount *mnt = file->f_path.mnt;
56200 +
56201 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56202 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56203 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56204 + return -EACCES;
56205 + }
56206 +#endif
56207 + return 0;
56208 +}
56209 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56210 new file mode 100644
56211 index 0000000..7a5b2de
56212 --- /dev/null
56213 +++ b/grsecurity/grsec_sig.c
56214 @@ -0,0 +1,207 @@
56215 +#include <linux/kernel.h>
56216 +#include <linux/sched.h>
56217 +#include <linux/delay.h>
56218 +#include <linux/grsecurity.h>
56219 +#include <linux/grinternal.h>
56220 +#include <linux/hardirq.h>
56221 +
56222 +char *signames[] = {
56223 + [SIGSEGV] = "Segmentation fault",
56224 + [SIGILL] = "Illegal instruction",
56225 + [SIGABRT] = "Abort",
56226 + [SIGBUS] = "Invalid alignment/Bus error"
56227 +};
56228 +
56229 +void
56230 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56231 +{
56232 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56233 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56234 + (sig == SIGABRT) || (sig == SIGBUS))) {
56235 + if (t->pid == current->pid) {
56236 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56237 + } else {
56238 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56239 + }
56240 + }
56241 +#endif
56242 + return;
56243 +}
56244 +
56245 +int
56246 +gr_handle_signal(const struct task_struct *p, const int sig)
56247 +{
56248 +#ifdef CONFIG_GRKERNSEC
56249 + /* ignore the 0 signal for protected task checks */
56250 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56251 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56252 + return -EPERM;
56253 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56254 + return -EPERM;
56255 + }
56256 +#endif
56257 + return 0;
56258 +}
56259 +
56260 +#ifdef CONFIG_GRKERNSEC
56261 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56262 +
56263 +int gr_fake_force_sig(int sig, struct task_struct *t)
56264 +{
56265 + unsigned long int flags;
56266 + int ret, blocked, ignored;
56267 + struct k_sigaction *action;
56268 +
56269 + spin_lock_irqsave(&t->sighand->siglock, flags);
56270 + action = &t->sighand->action[sig-1];
56271 + ignored = action->sa.sa_handler == SIG_IGN;
56272 + blocked = sigismember(&t->blocked, sig);
56273 + if (blocked || ignored) {
56274 + action->sa.sa_handler = SIG_DFL;
56275 + if (blocked) {
56276 + sigdelset(&t->blocked, sig);
56277 + recalc_sigpending_and_wake(t);
56278 + }
56279 + }
56280 + if (action->sa.sa_handler == SIG_DFL)
56281 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
56282 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56283 +
56284 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
56285 +
56286 + return ret;
56287 +}
56288 +#endif
56289 +
56290 +#ifdef CONFIG_GRKERNSEC_BRUTE
56291 +#define GR_USER_BAN_TIME (15 * 60)
56292 +
56293 +static int __get_dumpable(unsigned long mm_flags)
56294 +{
56295 + int ret;
56296 +
56297 + ret = mm_flags & MMF_DUMPABLE_MASK;
56298 + return (ret >= 2) ? 2 : ret;
56299 +}
56300 +#endif
56301 +
56302 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56303 +{
56304 +#ifdef CONFIG_GRKERNSEC_BRUTE
56305 + uid_t uid = 0;
56306 +
56307 + if (!grsec_enable_brute)
56308 + return;
56309 +
56310 + rcu_read_lock();
56311 + read_lock(&tasklist_lock);
56312 + read_lock(&grsec_exec_file_lock);
56313 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56314 + p->real_parent->brute = 1;
56315 + else {
56316 + const struct cred *cred = __task_cred(p), *cred2;
56317 + struct task_struct *tsk, *tsk2;
56318 +
56319 + if (!__get_dumpable(mm_flags) && cred->uid) {
56320 + struct user_struct *user;
56321 +
56322 + uid = cred->uid;
56323 +
56324 + /* this is put upon execution past expiration */
56325 + user = find_user(uid);
56326 + if (user == NULL)
56327 + goto unlock;
56328 + user->banned = 1;
56329 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56330 + if (user->ban_expires == ~0UL)
56331 + user->ban_expires--;
56332 +
56333 + do_each_thread(tsk2, tsk) {
56334 + cred2 = __task_cred(tsk);
56335 + if (tsk != p && cred2->uid == uid)
56336 + gr_fake_force_sig(SIGKILL, tsk);
56337 + } while_each_thread(tsk2, tsk);
56338 + }
56339 + }
56340 +unlock:
56341 + read_unlock(&grsec_exec_file_lock);
56342 + read_unlock(&tasklist_lock);
56343 + rcu_read_unlock();
56344 +
56345 + if (uid)
56346 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56347 +
56348 +#endif
56349 + return;
56350 +}
56351 +
56352 +void gr_handle_brute_check(void)
56353 +{
56354 +#ifdef CONFIG_GRKERNSEC_BRUTE
56355 + if (current->brute)
56356 + msleep(30 * 1000);
56357 +#endif
56358 + return;
56359 +}
56360 +
56361 +void gr_handle_kernel_exploit(void)
56362 +{
56363 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56364 + const struct cred *cred;
56365 + struct task_struct *tsk, *tsk2;
56366 + struct user_struct *user;
56367 + uid_t uid;
56368 +
56369 + if (in_irq() || in_serving_softirq() || in_nmi())
56370 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56371 +
56372 + uid = current_uid();
56373 +
56374 + if (uid == 0)
56375 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
56376 + else {
56377 + /* kill all the processes of this user, hold a reference
56378 + to their creds struct, and prevent them from creating
56379 + another process until system reset
56380 + */
56381 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56382 + /* we intentionally leak this ref */
56383 + user = get_uid(current->cred->user);
56384 + if (user) {
56385 + user->banned = 1;
56386 + user->ban_expires = ~0UL;
56387 + }
56388 +
56389 + read_lock(&tasklist_lock);
56390 + do_each_thread(tsk2, tsk) {
56391 + cred = __task_cred(tsk);
56392 + if (cred->uid == uid)
56393 + gr_fake_force_sig(SIGKILL, tsk);
56394 + } while_each_thread(tsk2, tsk);
56395 + read_unlock(&tasklist_lock);
56396 + }
56397 +#endif
56398 +}
56399 +
56400 +int __gr_process_user_ban(struct user_struct *user)
56401 +{
56402 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56403 + if (unlikely(user->banned)) {
56404 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56405 + user->banned = 0;
56406 + user->ban_expires = 0;
56407 + free_uid(user);
56408 + } else
56409 + return -EPERM;
56410 + }
56411 +#endif
56412 + return 0;
56413 +}
56414 +
56415 +int gr_process_user_ban(void)
56416 +{
56417 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56418 + return __gr_process_user_ban(current->cred->user);
56419 +#endif
56420 + return 0;
56421 +}
56422 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56423 new file mode 100644
56424 index 0000000..4030d57
56425 --- /dev/null
56426 +++ b/grsecurity/grsec_sock.c
56427 @@ -0,0 +1,244 @@
56428 +#include <linux/kernel.h>
56429 +#include <linux/module.h>
56430 +#include <linux/sched.h>
56431 +#include <linux/file.h>
56432 +#include <linux/net.h>
56433 +#include <linux/in.h>
56434 +#include <linux/ip.h>
56435 +#include <net/sock.h>
56436 +#include <net/inet_sock.h>
56437 +#include <linux/grsecurity.h>
56438 +#include <linux/grinternal.h>
56439 +#include <linux/gracl.h>
56440 +
56441 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56442 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56443 +
56444 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
56445 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
56446 +
56447 +#ifdef CONFIG_UNIX_MODULE
56448 +EXPORT_SYMBOL(gr_acl_handle_unix);
56449 +EXPORT_SYMBOL(gr_acl_handle_mknod);
56450 +EXPORT_SYMBOL(gr_handle_chroot_unix);
56451 +EXPORT_SYMBOL(gr_handle_create);
56452 +#endif
56453 +
56454 +#ifdef CONFIG_GRKERNSEC
56455 +#define gr_conn_table_size 32749
56456 +struct conn_table_entry {
56457 + struct conn_table_entry *next;
56458 + struct signal_struct *sig;
56459 +};
56460 +
56461 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56462 +DEFINE_SPINLOCK(gr_conn_table_lock);
56463 +
56464 +extern const char * gr_socktype_to_name(unsigned char type);
56465 +extern const char * gr_proto_to_name(unsigned char proto);
56466 +extern const char * gr_sockfamily_to_name(unsigned char family);
56467 +
56468 +static __inline__ int
56469 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56470 +{
56471 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56472 +}
56473 +
56474 +static __inline__ int
56475 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56476 + __u16 sport, __u16 dport)
56477 +{
56478 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56479 + sig->gr_sport == sport && sig->gr_dport == dport))
56480 + return 1;
56481 + else
56482 + return 0;
56483 +}
56484 +
56485 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56486 +{
56487 + struct conn_table_entry **match;
56488 + unsigned int index;
56489 +
56490 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56491 + sig->gr_sport, sig->gr_dport,
56492 + gr_conn_table_size);
56493 +
56494 + newent->sig = sig;
56495 +
56496 + match = &gr_conn_table[index];
56497 + newent->next = *match;
56498 + *match = newent;
56499 +
56500 + return;
56501 +}
56502 +
56503 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56504 +{
56505 + struct conn_table_entry *match, *last = NULL;
56506 + unsigned int index;
56507 +
56508 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56509 + sig->gr_sport, sig->gr_dport,
56510 + gr_conn_table_size);
56511 +
56512 + match = gr_conn_table[index];
56513 + while (match && !conn_match(match->sig,
56514 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56515 + sig->gr_dport)) {
56516 + last = match;
56517 + match = match->next;
56518 + }
56519 +
56520 + if (match) {
56521 + if (last)
56522 + last->next = match->next;
56523 + else
56524 + gr_conn_table[index] = NULL;
56525 + kfree(match);
56526 + }
56527 +
56528 + return;
56529 +}
56530 +
56531 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56532 + __u16 sport, __u16 dport)
56533 +{
56534 + struct conn_table_entry *match;
56535 + unsigned int index;
56536 +
56537 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56538 +
56539 + match = gr_conn_table[index];
56540 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56541 + match = match->next;
56542 +
56543 + if (match)
56544 + return match->sig;
56545 + else
56546 + return NULL;
56547 +}
56548 +
56549 +#endif
56550 +
56551 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56552 +{
56553 +#ifdef CONFIG_GRKERNSEC
56554 + struct signal_struct *sig = task->signal;
56555 + struct conn_table_entry *newent;
56556 +
56557 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56558 + if (newent == NULL)
56559 + return;
56560 + /* no bh lock needed since we are called with bh disabled */
56561 + spin_lock(&gr_conn_table_lock);
56562 + gr_del_task_from_ip_table_nolock(sig);
56563 + sig->gr_saddr = inet->inet_rcv_saddr;
56564 + sig->gr_daddr = inet->inet_daddr;
56565 + sig->gr_sport = inet->inet_sport;
56566 + sig->gr_dport = inet->inet_dport;
56567 + gr_add_to_task_ip_table_nolock(sig, newent);
56568 + spin_unlock(&gr_conn_table_lock);
56569 +#endif
56570 + return;
56571 +}
56572 +
56573 +void gr_del_task_from_ip_table(struct task_struct *task)
56574 +{
56575 +#ifdef CONFIG_GRKERNSEC
56576 + spin_lock_bh(&gr_conn_table_lock);
56577 + gr_del_task_from_ip_table_nolock(task->signal);
56578 + spin_unlock_bh(&gr_conn_table_lock);
56579 +#endif
56580 + return;
56581 +}
56582 +
56583 +void
56584 +gr_attach_curr_ip(const struct sock *sk)
56585 +{
56586 +#ifdef CONFIG_GRKERNSEC
56587 + struct signal_struct *p, *set;
56588 + const struct inet_sock *inet = inet_sk(sk);
56589 +
56590 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56591 + return;
56592 +
56593 + set = current->signal;
56594 +
56595 + spin_lock_bh(&gr_conn_table_lock);
56596 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56597 + inet->inet_dport, inet->inet_sport);
56598 + if (unlikely(p != NULL)) {
56599 + set->curr_ip = p->curr_ip;
56600 + set->used_accept = 1;
56601 + gr_del_task_from_ip_table_nolock(p);
56602 + spin_unlock_bh(&gr_conn_table_lock);
56603 + return;
56604 + }
56605 + spin_unlock_bh(&gr_conn_table_lock);
56606 +
56607 + set->curr_ip = inet->inet_daddr;
56608 + set->used_accept = 1;
56609 +#endif
56610 + return;
56611 +}
56612 +
56613 +int
56614 +gr_handle_sock_all(const int family, const int type, const int protocol)
56615 +{
56616 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56617 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56618 + (family != AF_UNIX)) {
56619 + if (family == AF_INET)
56620 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56621 + else
56622 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56623 + return -EACCES;
56624 + }
56625 +#endif
56626 + return 0;
56627 +}
56628 +
56629 +int
56630 +gr_handle_sock_server(const struct sockaddr *sck)
56631 +{
56632 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56633 + if (grsec_enable_socket_server &&
56634 + in_group_p(grsec_socket_server_gid) &&
56635 + sck && (sck->sa_family != AF_UNIX) &&
56636 + (sck->sa_family != AF_LOCAL)) {
56637 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56638 + return -EACCES;
56639 + }
56640 +#endif
56641 + return 0;
56642 +}
56643 +
56644 +int
56645 +gr_handle_sock_server_other(const struct sock *sck)
56646 +{
56647 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56648 + if (grsec_enable_socket_server &&
56649 + in_group_p(grsec_socket_server_gid) &&
56650 + sck && (sck->sk_family != AF_UNIX) &&
56651 + (sck->sk_family != AF_LOCAL)) {
56652 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56653 + return -EACCES;
56654 + }
56655 +#endif
56656 + return 0;
56657 +}
56658 +
56659 +int
56660 +gr_handle_sock_client(const struct sockaddr *sck)
56661 +{
56662 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56663 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56664 + sck && (sck->sa_family != AF_UNIX) &&
56665 + (sck->sa_family != AF_LOCAL)) {
56666 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56667 + return -EACCES;
56668 + }
56669 +#endif
56670 + return 0;
56671 +}
56672 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56673 new file mode 100644
56674 index 0000000..a1aedd7
56675 --- /dev/null
56676 +++ b/grsecurity/grsec_sysctl.c
56677 @@ -0,0 +1,451 @@
56678 +#include <linux/kernel.h>
56679 +#include <linux/sched.h>
56680 +#include <linux/sysctl.h>
56681 +#include <linux/grsecurity.h>
56682 +#include <linux/grinternal.h>
56683 +
56684 +int
56685 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56686 +{
56687 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56688 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56689 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56690 + return -EACCES;
56691 + }
56692 +#endif
56693 + return 0;
56694 +}
56695 +
56696 +#ifdef CONFIG_GRKERNSEC_ROFS
56697 +static int __maybe_unused one = 1;
56698 +#endif
56699 +
56700 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56701 +struct ctl_table grsecurity_table[] = {
56702 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56703 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56704 +#ifdef CONFIG_GRKERNSEC_IO
56705 + {
56706 + .procname = "disable_priv_io",
56707 + .data = &grsec_disable_privio,
56708 + .maxlen = sizeof(int),
56709 + .mode = 0600,
56710 + .proc_handler = &proc_dointvec,
56711 + },
56712 +#endif
56713 +#endif
56714 +#ifdef CONFIG_GRKERNSEC_LINK
56715 + {
56716 + .procname = "linking_restrictions",
56717 + .data = &grsec_enable_link,
56718 + .maxlen = sizeof(int),
56719 + .mode = 0600,
56720 + .proc_handler = &proc_dointvec,
56721 + },
56722 +#endif
56723 +#ifdef CONFIG_GRKERNSEC_BRUTE
56724 + {
56725 + .procname = "deter_bruteforce",
56726 + .data = &grsec_enable_brute,
56727 + .maxlen = sizeof(int),
56728 + .mode = 0600,
56729 + .proc_handler = &proc_dointvec,
56730 + },
56731 +#endif
56732 +#ifdef CONFIG_GRKERNSEC_FIFO
56733 + {
56734 + .procname = "fifo_restrictions",
56735 + .data = &grsec_enable_fifo,
56736 + .maxlen = sizeof(int),
56737 + .mode = 0600,
56738 + .proc_handler = &proc_dointvec,
56739 + },
56740 +#endif
56741 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56742 + {
56743 + .procname = "ptrace_readexec",
56744 + .data = &grsec_enable_ptrace_readexec,
56745 + .maxlen = sizeof(int),
56746 + .mode = 0600,
56747 + .proc_handler = &proc_dointvec,
56748 + },
56749 +#endif
56750 +#ifdef CONFIG_GRKERNSEC_SETXID
56751 + {
56752 + .procname = "consistent_setxid",
56753 + .data = &grsec_enable_setxid,
56754 + .maxlen = sizeof(int),
56755 + .mode = 0600,
56756 + .proc_handler = &proc_dointvec,
56757 + },
56758 +#endif
56759 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56760 + {
56761 + .procname = "ip_blackhole",
56762 + .data = &grsec_enable_blackhole,
56763 + .maxlen = sizeof(int),
56764 + .mode = 0600,
56765 + .proc_handler = &proc_dointvec,
56766 + },
56767 + {
56768 + .procname = "lastack_retries",
56769 + .data = &grsec_lastack_retries,
56770 + .maxlen = sizeof(int),
56771 + .mode = 0600,
56772 + .proc_handler = &proc_dointvec,
56773 + },
56774 +#endif
56775 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56776 + {
56777 + .procname = "exec_logging",
56778 + .data = &grsec_enable_execlog,
56779 + .maxlen = sizeof(int),
56780 + .mode = 0600,
56781 + .proc_handler = &proc_dointvec,
56782 + },
56783 +#endif
56784 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56785 + {
56786 + .procname = "rwxmap_logging",
56787 + .data = &grsec_enable_log_rwxmaps,
56788 + .maxlen = sizeof(int),
56789 + .mode = 0600,
56790 + .proc_handler = &proc_dointvec,
56791 + },
56792 +#endif
56793 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56794 + {
56795 + .procname = "signal_logging",
56796 + .data = &grsec_enable_signal,
56797 + .maxlen = sizeof(int),
56798 + .mode = 0600,
56799 + .proc_handler = &proc_dointvec,
56800 + },
56801 +#endif
56802 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56803 + {
56804 + .procname = "forkfail_logging",
56805 + .data = &grsec_enable_forkfail,
56806 + .maxlen = sizeof(int),
56807 + .mode = 0600,
56808 + .proc_handler = &proc_dointvec,
56809 + },
56810 +#endif
56811 +#ifdef CONFIG_GRKERNSEC_TIME
56812 + {
56813 + .procname = "timechange_logging",
56814 + .data = &grsec_enable_time,
56815 + .maxlen = sizeof(int),
56816 + .mode = 0600,
56817 + .proc_handler = &proc_dointvec,
56818 + },
56819 +#endif
56820 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56821 + {
56822 + .procname = "chroot_deny_shmat",
56823 + .data = &grsec_enable_chroot_shmat,
56824 + .maxlen = sizeof(int),
56825 + .mode = 0600,
56826 + .proc_handler = &proc_dointvec,
56827 + },
56828 +#endif
56829 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56830 + {
56831 + .procname = "chroot_deny_unix",
56832 + .data = &grsec_enable_chroot_unix,
56833 + .maxlen = sizeof(int),
56834 + .mode = 0600,
56835 + .proc_handler = &proc_dointvec,
56836 + },
56837 +#endif
56838 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56839 + {
56840 + .procname = "chroot_deny_mount",
56841 + .data = &grsec_enable_chroot_mount,
56842 + .maxlen = sizeof(int),
56843 + .mode = 0600,
56844 + .proc_handler = &proc_dointvec,
56845 + },
56846 +#endif
56847 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56848 + {
56849 + .procname = "chroot_deny_fchdir",
56850 + .data = &grsec_enable_chroot_fchdir,
56851 + .maxlen = sizeof(int),
56852 + .mode = 0600,
56853 + .proc_handler = &proc_dointvec,
56854 + },
56855 +#endif
56856 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56857 + {
56858 + .procname = "chroot_deny_chroot",
56859 + .data = &grsec_enable_chroot_double,
56860 + .maxlen = sizeof(int),
56861 + .mode = 0600,
56862 + .proc_handler = &proc_dointvec,
56863 + },
56864 +#endif
56865 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56866 + {
56867 + .procname = "chroot_deny_pivot",
56868 + .data = &grsec_enable_chroot_pivot,
56869 + .maxlen = sizeof(int),
56870 + .mode = 0600,
56871 + .proc_handler = &proc_dointvec,
56872 + },
56873 +#endif
56874 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56875 + {
56876 + .procname = "chroot_enforce_chdir",
56877 + .data = &grsec_enable_chroot_chdir,
56878 + .maxlen = sizeof(int),
56879 + .mode = 0600,
56880 + .proc_handler = &proc_dointvec,
56881 + },
56882 +#endif
56883 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56884 + {
56885 + .procname = "chroot_deny_chmod",
56886 + .data = &grsec_enable_chroot_chmod,
56887 + .maxlen = sizeof(int),
56888 + .mode = 0600,
56889 + .proc_handler = &proc_dointvec,
56890 + },
56891 +#endif
56892 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56893 + {
56894 + .procname = "chroot_deny_mknod",
56895 + .data = &grsec_enable_chroot_mknod,
56896 + .maxlen = sizeof(int),
56897 + .mode = 0600,
56898 + .proc_handler = &proc_dointvec,
56899 + },
56900 +#endif
56901 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56902 + {
56903 + .procname = "chroot_restrict_nice",
56904 + .data = &grsec_enable_chroot_nice,
56905 + .maxlen = sizeof(int),
56906 + .mode = 0600,
56907 + .proc_handler = &proc_dointvec,
56908 + },
56909 +#endif
56910 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56911 + {
56912 + .procname = "chroot_execlog",
56913 + .data = &grsec_enable_chroot_execlog,
56914 + .maxlen = sizeof(int),
56915 + .mode = 0600,
56916 + .proc_handler = &proc_dointvec,
56917 + },
56918 +#endif
56919 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56920 + {
56921 + .procname = "chroot_caps",
56922 + .data = &grsec_enable_chroot_caps,
56923 + .maxlen = sizeof(int),
56924 + .mode = 0600,
56925 + .proc_handler = &proc_dointvec,
56926 + },
56927 +#endif
56928 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56929 + {
56930 + .procname = "chroot_deny_sysctl",
56931 + .data = &grsec_enable_chroot_sysctl,
56932 + .maxlen = sizeof(int),
56933 + .mode = 0600,
56934 + .proc_handler = &proc_dointvec,
56935 + },
56936 +#endif
56937 +#ifdef CONFIG_GRKERNSEC_TPE
56938 + {
56939 + .procname = "tpe",
56940 + .data = &grsec_enable_tpe,
56941 + .maxlen = sizeof(int),
56942 + .mode = 0600,
56943 + .proc_handler = &proc_dointvec,
56944 + },
56945 + {
56946 + .procname = "tpe_gid",
56947 + .data = &grsec_tpe_gid,
56948 + .maxlen = sizeof(int),
56949 + .mode = 0600,
56950 + .proc_handler = &proc_dointvec,
56951 + },
56952 +#endif
56953 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56954 + {
56955 + .procname = "tpe_invert",
56956 + .data = &grsec_enable_tpe_invert,
56957 + .maxlen = sizeof(int),
56958 + .mode = 0600,
56959 + .proc_handler = &proc_dointvec,
56960 + },
56961 +#endif
56962 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56963 + {
56964 + .procname = "tpe_restrict_all",
56965 + .data = &grsec_enable_tpe_all,
56966 + .maxlen = sizeof(int),
56967 + .mode = 0600,
56968 + .proc_handler = &proc_dointvec,
56969 + },
56970 +#endif
56971 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56972 + {
56973 + .procname = "socket_all",
56974 + .data = &grsec_enable_socket_all,
56975 + .maxlen = sizeof(int),
56976 + .mode = 0600,
56977 + .proc_handler = &proc_dointvec,
56978 + },
56979 + {
56980 + .procname = "socket_all_gid",
56981 + .data = &grsec_socket_all_gid,
56982 + .maxlen = sizeof(int),
56983 + .mode = 0600,
56984 + .proc_handler = &proc_dointvec,
56985 + },
56986 +#endif
56987 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56988 + {
56989 + .procname = "socket_client",
56990 + .data = &grsec_enable_socket_client,
56991 + .maxlen = sizeof(int),
56992 + .mode = 0600,
56993 + .proc_handler = &proc_dointvec,
56994 + },
56995 + {
56996 + .procname = "socket_client_gid",
56997 + .data = &grsec_socket_client_gid,
56998 + .maxlen = sizeof(int),
56999 + .mode = 0600,
57000 + .proc_handler = &proc_dointvec,
57001 + },
57002 +#endif
57003 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57004 + {
57005 + .procname = "socket_server",
57006 + .data = &grsec_enable_socket_server,
57007 + .maxlen = sizeof(int),
57008 + .mode = 0600,
57009 + .proc_handler = &proc_dointvec,
57010 + },
57011 + {
57012 + .procname = "socket_server_gid",
57013 + .data = &grsec_socket_server_gid,
57014 + .maxlen = sizeof(int),
57015 + .mode = 0600,
57016 + .proc_handler = &proc_dointvec,
57017 + },
57018 +#endif
57019 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57020 + {
57021 + .procname = "audit_group",
57022 + .data = &grsec_enable_group,
57023 + .maxlen = sizeof(int),
57024 + .mode = 0600,
57025 + .proc_handler = &proc_dointvec,
57026 + },
57027 + {
57028 + .procname = "audit_gid",
57029 + .data = &grsec_audit_gid,
57030 + .maxlen = sizeof(int),
57031 + .mode = 0600,
57032 + .proc_handler = &proc_dointvec,
57033 + },
57034 +#endif
57035 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57036 + {
57037 + .procname = "audit_chdir",
57038 + .data = &grsec_enable_chdir,
57039 + .maxlen = sizeof(int),
57040 + .mode = 0600,
57041 + .proc_handler = &proc_dointvec,
57042 + },
57043 +#endif
57044 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57045 + {
57046 + .procname = "audit_mount",
57047 + .data = &grsec_enable_mount,
57048 + .maxlen = sizeof(int),
57049 + .mode = 0600,
57050 + .proc_handler = &proc_dointvec,
57051 + },
57052 +#endif
57053 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57054 + {
57055 + .procname = "audit_textrel",
57056 + .data = &grsec_enable_audit_textrel,
57057 + .maxlen = sizeof(int),
57058 + .mode = 0600,
57059 + .proc_handler = &proc_dointvec,
57060 + },
57061 +#endif
57062 +#ifdef CONFIG_GRKERNSEC_DMESG
57063 + {
57064 + .procname = "dmesg",
57065 + .data = &grsec_enable_dmesg,
57066 + .maxlen = sizeof(int),
57067 + .mode = 0600,
57068 + .proc_handler = &proc_dointvec,
57069 + },
57070 +#endif
57071 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57072 + {
57073 + .procname = "chroot_findtask",
57074 + .data = &grsec_enable_chroot_findtask,
57075 + .maxlen = sizeof(int),
57076 + .mode = 0600,
57077 + .proc_handler = &proc_dointvec,
57078 + },
57079 +#endif
57080 +#ifdef CONFIG_GRKERNSEC_RESLOG
57081 + {
57082 + .procname = "resource_logging",
57083 + .data = &grsec_resource_logging,
57084 + .maxlen = sizeof(int),
57085 + .mode = 0600,
57086 + .proc_handler = &proc_dointvec,
57087 + },
57088 +#endif
57089 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57090 + {
57091 + .procname = "audit_ptrace",
57092 + .data = &grsec_enable_audit_ptrace,
57093 + .maxlen = sizeof(int),
57094 + .mode = 0600,
57095 + .proc_handler = &proc_dointvec,
57096 + },
57097 +#endif
57098 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57099 + {
57100 + .procname = "harden_ptrace",
57101 + .data = &grsec_enable_harden_ptrace,
57102 + .maxlen = sizeof(int),
57103 + .mode = 0600,
57104 + .proc_handler = &proc_dointvec,
57105 + },
57106 +#endif
57107 + {
57108 + .procname = "grsec_lock",
57109 + .data = &grsec_lock,
57110 + .maxlen = sizeof(int),
57111 + .mode = 0600,
57112 + .proc_handler = &proc_dointvec,
57113 + },
57114 +#endif
57115 +#ifdef CONFIG_GRKERNSEC_ROFS
57116 + {
57117 + .procname = "romount_protect",
57118 + .data = &grsec_enable_rofs,
57119 + .maxlen = sizeof(int),
57120 + .mode = 0600,
57121 + .proc_handler = &proc_dointvec_minmax,
57122 + .extra1 = &one,
57123 + .extra2 = &one,
57124 + },
57125 +#endif
57126 + { }
57127 +};
57128 +#endif
57129 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
57130 new file mode 100644
57131 index 0000000..0dc13c3
57132 --- /dev/null
57133 +++ b/grsecurity/grsec_time.c
57134 @@ -0,0 +1,16 @@
57135 +#include <linux/kernel.h>
57136 +#include <linux/sched.h>
57137 +#include <linux/grinternal.h>
57138 +#include <linux/module.h>
57139 +
57140 +void
57141 +gr_log_timechange(void)
57142 +{
57143 +#ifdef CONFIG_GRKERNSEC_TIME
57144 + if (grsec_enable_time)
57145 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
57146 +#endif
57147 + return;
57148 +}
57149 +
57150 +EXPORT_SYMBOL(gr_log_timechange);
57151 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
57152 new file mode 100644
57153 index 0000000..07e0dc0
57154 --- /dev/null
57155 +++ b/grsecurity/grsec_tpe.c
57156 @@ -0,0 +1,73 @@
57157 +#include <linux/kernel.h>
57158 +#include <linux/sched.h>
57159 +#include <linux/file.h>
57160 +#include <linux/fs.h>
57161 +#include <linux/grinternal.h>
57162 +
57163 +extern int gr_acl_tpe_check(void);
57164 +
57165 +int
57166 +gr_tpe_allow(const struct file *file)
57167 +{
57168 +#ifdef CONFIG_GRKERNSEC
57169 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
57170 + const struct cred *cred = current_cred();
57171 + char *msg = NULL;
57172 + char *msg2 = NULL;
57173 +
57174 + // never restrict root
57175 + if (!cred->uid)
57176 + return 1;
57177 +
57178 + if (grsec_enable_tpe) {
57179 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57180 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
57181 + msg = "not being in trusted group";
57182 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
57183 + msg = "being in untrusted group";
57184 +#else
57185 + if (in_group_p(grsec_tpe_gid))
57186 + msg = "being in untrusted group";
57187 +#endif
57188 + }
57189 + if (!msg && gr_acl_tpe_check())
57190 + msg = "being in untrusted role";
57191 +
57192 + // not in any affected group/role
57193 + if (!msg)
57194 + goto next_check;
57195 +
57196 + if (inode->i_uid)
57197 + msg2 = "file in non-root-owned directory";
57198 + else if (inode->i_mode & S_IWOTH)
57199 + msg2 = "file in world-writable directory";
57200 + else if (inode->i_mode & S_IWGRP)
57201 + msg2 = "file in group-writable directory";
57202 +
57203 + if (msg && msg2) {
57204 + char fullmsg[70] = {0};
57205 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
57206 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
57207 + return 0;
57208 + }
57209 + msg = NULL;
57210 +next_check:
57211 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57212 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
57213 + return 1;
57214 +
57215 + if (inode->i_uid && (inode->i_uid != cred->uid))
57216 + msg = "directory not owned by user";
57217 + else if (inode->i_mode & S_IWOTH)
57218 + msg = "file in world-writable directory";
57219 + else if (inode->i_mode & S_IWGRP)
57220 + msg = "file in group-writable directory";
57221 +
57222 + if (msg) {
57223 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
57224 + return 0;
57225 + }
57226 +#endif
57227 +#endif
57228 + return 1;
57229 +}
57230 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
57231 new file mode 100644
57232 index 0000000..9f7b1ac
57233 --- /dev/null
57234 +++ b/grsecurity/grsum.c
57235 @@ -0,0 +1,61 @@
57236 +#include <linux/err.h>
57237 +#include <linux/kernel.h>
57238 +#include <linux/sched.h>
57239 +#include <linux/mm.h>
57240 +#include <linux/scatterlist.h>
57241 +#include <linux/crypto.h>
57242 +#include <linux/gracl.h>
57243 +
57244 +
57245 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57246 +#error "crypto and sha256 must be built into the kernel"
57247 +#endif
57248 +
57249 +int
57250 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57251 +{
57252 + char *p;
57253 + struct crypto_hash *tfm;
57254 + struct hash_desc desc;
57255 + struct scatterlist sg;
57256 + unsigned char temp_sum[GR_SHA_LEN];
57257 + volatile int retval = 0;
57258 + volatile int dummy = 0;
57259 + unsigned int i;
57260 +
57261 + sg_init_table(&sg, 1);
57262 +
57263 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57264 + if (IS_ERR(tfm)) {
57265 + /* should never happen, since sha256 should be built in */
57266 + return 1;
57267 + }
57268 +
57269 + desc.tfm = tfm;
57270 + desc.flags = 0;
57271 +
57272 + crypto_hash_init(&desc);
57273 +
57274 + p = salt;
57275 + sg_set_buf(&sg, p, GR_SALT_LEN);
57276 + crypto_hash_update(&desc, &sg, sg.length);
57277 +
57278 + p = entry->pw;
57279 + sg_set_buf(&sg, p, strlen(p));
57280 +
57281 + crypto_hash_update(&desc, &sg, sg.length);
57282 +
57283 + crypto_hash_final(&desc, temp_sum);
57284 +
57285 + memset(entry->pw, 0, GR_PW_LEN);
57286 +
57287 + for (i = 0; i < GR_SHA_LEN; i++)
57288 + if (sum[i] != temp_sum[i])
57289 + retval = 1;
57290 + else
57291 + dummy = 1; // waste a cycle
57292 +
57293 + crypto_free_hash(tfm);
57294 +
57295 + return retval;
57296 +}
57297 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57298 index 6cd5b64..f620d2d 100644
57299 --- a/include/acpi/acpi_bus.h
57300 +++ b/include/acpi/acpi_bus.h
57301 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57302 acpi_op_bind bind;
57303 acpi_op_unbind unbind;
57304 acpi_op_notify notify;
57305 -};
57306 +} __no_const;
57307
57308 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57309
57310 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57311 index b7babf0..71e4e74 100644
57312 --- a/include/asm-generic/atomic-long.h
57313 +++ b/include/asm-generic/atomic-long.h
57314 @@ -22,6 +22,12 @@
57315
57316 typedef atomic64_t atomic_long_t;
57317
57318 +#ifdef CONFIG_PAX_REFCOUNT
57319 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57320 +#else
57321 +typedef atomic64_t atomic_long_unchecked_t;
57322 +#endif
57323 +
57324 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57325
57326 static inline long atomic_long_read(atomic_long_t *l)
57327 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57328 return (long)atomic64_read(v);
57329 }
57330
57331 +#ifdef CONFIG_PAX_REFCOUNT
57332 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57333 +{
57334 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57335 +
57336 + return (long)atomic64_read_unchecked(v);
57337 +}
57338 +#endif
57339 +
57340 static inline void atomic_long_set(atomic_long_t *l, long i)
57341 {
57342 atomic64_t *v = (atomic64_t *)l;
57343 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57344 atomic64_set(v, i);
57345 }
57346
57347 +#ifdef CONFIG_PAX_REFCOUNT
57348 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57349 +{
57350 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57351 +
57352 + atomic64_set_unchecked(v, i);
57353 +}
57354 +#endif
57355 +
57356 static inline void atomic_long_inc(atomic_long_t *l)
57357 {
57358 atomic64_t *v = (atomic64_t *)l;
57359 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57360 atomic64_inc(v);
57361 }
57362
57363 +#ifdef CONFIG_PAX_REFCOUNT
57364 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57365 +{
57366 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57367 +
57368 + atomic64_inc_unchecked(v);
57369 +}
57370 +#endif
57371 +
57372 static inline void atomic_long_dec(atomic_long_t *l)
57373 {
57374 atomic64_t *v = (atomic64_t *)l;
57375 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57376 atomic64_dec(v);
57377 }
57378
57379 +#ifdef CONFIG_PAX_REFCOUNT
57380 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57381 +{
57382 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57383 +
57384 + atomic64_dec_unchecked(v);
57385 +}
57386 +#endif
57387 +
57388 static inline void atomic_long_add(long i, atomic_long_t *l)
57389 {
57390 atomic64_t *v = (atomic64_t *)l;
57391 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57392 atomic64_add(i, v);
57393 }
57394
57395 +#ifdef CONFIG_PAX_REFCOUNT
57396 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57397 +{
57398 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57399 +
57400 + atomic64_add_unchecked(i, v);
57401 +}
57402 +#endif
57403 +
57404 static inline void atomic_long_sub(long i, atomic_long_t *l)
57405 {
57406 atomic64_t *v = (atomic64_t *)l;
57407 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57408 atomic64_sub(i, v);
57409 }
57410
57411 +#ifdef CONFIG_PAX_REFCOUNT
57412 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57413 +{
57414 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57415 +
57416 + atomic64_sub_unchecked(i, v);
57417 +}
57418 +#endif
57419 +
57420 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57421 {
57422 atomic64_t *v = (atomic64_t *)l;
57423 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57424 return (long)atomic64_inc_return(v);
57425 }
57426
57427 +#ifdef CONFIG_PAX_REFCOUNT
57428 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57429 +{
57430 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57431 +
57432 + return (long)atomic64_inc_return_unchecked(v);
57433 +}
57434 +#endif
57435 +
57436 static inline long atomic_long_dec_return(atomic_long_t *l)
57437 {
57438 atomic64_t *v = (atomic64_t *)l;
57439 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57440
57441 typedef atomic_t atomic_long_t;
57442
57443 +#ifdef CONFIG_PAX_REFCOUNT
57444 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57445 +#else
57446 +typedef atomic_t atomic_long_unchecked_t;
57447 +#endif
57448 +
57449 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57450 static inline long atomic_long_read(atomic_long_t *l)
57451 {
57452 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57453 return (long)atomic_read(v);
57454 }
57455
57456 +#ifdef CONFIG_PAX_REFCOUNT
57457 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57458 +{
57459 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57460 +
57461 + return (long)atomic_read_unchecked(v);
57462 +}
57463 +#endif
57464 +
57465 static inline void atomic_long_set(atomic_long_t *l, long i)
57466 {
57467 atomic_t *v = (atomic_t *)l;
57468 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57469 atomic_set(v, i);
57470 }
57471
57472 +#ifdef CONFIG_PAX_REFCOUNT
57473 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57474 +{
57475 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57476 +
57477 + atomic_set_unchecked(v, i);
57478 +}
57479 +#endif
57480 +
57481 static inline void atomic_long_inc(atomic_long_t *l)
57482 {
57483 atomic_t *v = (atomic_t *)l;
57484 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57485 atomic_inc(v);
57486 }
57487
57488 +#ifdef CONFIG_PAX_REFCOUNT
57489 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57490 +{
57491 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57492 +
57493 + atomic_inc_unchecked(v);
57494 +}
57495 +#endif
57496 +
57497 static inline void atomic_long_dec(atomic_long_t *l)
57498 {
57499 atomic_t *v = (atomic_t *)l;
57500 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57501 atomic_dec(v);
57502 }
57503
57504 +#ifdef CONFIG_PAX_REFCOUNT
57505 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57506 +{
57507 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57508 +
57509 + atomic_dec_unchecked(v);
57510 +}
57511 +#endif
57512 +
57513 static inline void atomic_long_add(long i, atomic_long_t *l)
57514 {
57515 atomic_t *v = (atomic_t *)l;
57516 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57517 atomic_add(i, v);
57518 }
57519
57520 +#ifdef CONFIG_PAX_REFCOUNT
57521 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57522 +{
57523 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57524 +
57525 + atomic_add_unchecked(i, v);
57526 +}
57527 +#endif
57528 +
57529 static inline void atomic_long_sub(long i, atomic_long_t *l)
57530 {
57531 atomic_t *v = (atomic_t *)l;
57532 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57533 atomic_sub(i, v);
57534 }
57535
57536 +#ifdef CONFIG_PAX_REFCOUNT
57537 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57538 +{
57539 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57540 +
57541 + atomic_sub_unchecked(i, v);
57542 +}
57543 +#endif
57544 +
57545 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57546 {
57547 atomic_t *v = (atomic_t *)l;
57548 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57549 return (long)atomic_inc_return(v);
57550 }
57551
57552 +#ifdef CONFIG_PAX_REFCOUNT
57553 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57554 +{
57555 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57556 +
57557 + return (long)atomic_inc_return_unchecked(v);
57558 +}
57559 +#endif
57560 +
57561 static inline long atomic_long_dec_return(atomic_long_t *l)
57562 {
57563 atomic_t *v = (atomic_t *)l;
57564 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57565
57566 #endif /* BITS_PER_LONG == 64 */
57567
57568 +#ifdef CONFIG_PAX_REFCOUNT
57569 +static inline void pax_refcount_needs_these_functions(void)
57570 +{
57571 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57572 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57573 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57574 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57575 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57576 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57577 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57578 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57579 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57580 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57581 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57582 +
57583 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57584 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57585 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57586 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57587 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57588 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57589 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57590 +}
57591 +#else
57592 +#define atomic_read_unchecked(v) atomic_read(v)
57593 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57594 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57595 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57596 +#define atomic_inc_unchecked(v) atomic_inc(v)
57597 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57598 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57599 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57600 +#define atomic_dec_unchecked(v) atomic_dec(v)
57601 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57602 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57603 +
57604 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57605 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57606 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57607 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57608 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57609 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57610 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57611 +#endif
57612 +
57613 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57614 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57615 index b18ce4f..2ee2843 100644
57616 --- a/include/asm-generic/atomic64.h
57617 +++ b/include/asm-generic/atomic64.h
57618 @@ -16,6 +16,8 @@ typedef struct {
57619 long long counter;
57620 } atomic64_t;
57621
57622 +typedef atomic64_t atomic64_unchecked_t;
57623 +
57624 #define ATOMIC64_INIT(i) { (i) }
57625
57626 extern long long atomic64_read(const atomic64_t *v);
57627 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57628 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57629 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57630
57631 +#define atomic64_read_unchecked(v) atomic64_read(v)
57632 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57633 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57634 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57635 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57636 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
57637 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57638 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
57639 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57640 +
57641 #endif /* _ASM_GENERIC_ATOMIC64_H */
57642 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57643 index 1bfcfe5..e04c5c9 100644
57644 --- a/include/asm-generic/cache.h
57645 +++ b/include/asm-generic/cache.h
57646 @@ -6,7 +6,7 @@
57647 * cache lines need to provide their own cache.h.
57648 */
57649
57650 -#define L1_CACHE_SHIFT 5
57651 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57652 +#define L1_CACHE_SHIFT 5UL
57653 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57654
57655 #endif /* __ASM_GENERIC_CACHE_H */
57656 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57657 index 1ca3efc..e3dc852 100644
57658 --- a/include/asm-generic/int-l64.h
57659 +++ b/include/asm-generic/int-l64.h
57660 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57661 typedef signed long s64;
57662 typedef unsigned long u64;
57663
57664 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57665 +
57666 #define S8_C(x) x
57667 #define U8_C(x) x ## U
57668 #define S16_C(x) x
57669 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57670 index f394147..b6152b9 100644
57671 --- a/include/asm-generic/int-ll64.h
57672 +++ b/include/asm-generic/int-ll64.h
57673 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57674 typedef signed long long s64;
57675 typedef unsigned long long u64;
57676
57677 +typedef unsigned long long intoverflow_t;
57678 +
57679 #define S8_C(x) x
57680 #define U8_C(x) x ## U
57681 #define S16_C(x) x
57682 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57683 index 0232ccb..13d9165 100644
57684 --- a/include/asm-generic/kmap_types.h
57685 +++ b/include/asm-generic/kmap_types.h
57686 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57687 KMAP_D(17) KM_NMI,
57688 KMAP_D(18) KM_NMI_PTE,
57689 KMAP_D(19) KM_KDB,
57690 +KMAP_D(20) KM_CLEARPAGE,
57691 /*
57692 * Remember to update debug_kmap_atomic() when adding new kmap types!
57693 */
57694 -KMAP_D(20) KM_TYPE_NR
57695 +KMAP_D(21) KM_TYPE_NR
57696 };
57697
57698 #undef KMAP_D
57699 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57700 index 725612b..9cc513a 100644
57701 --- a/include/asm-generic/pgtable-nopmd.h
57702 +++ b/include/asm-generic/pgtable-nopmd.h
57703 @@ -1,14 +1,19 @@
57704 #ifndef _PGTABLE_NOPMD_H
57705 #define _PGTABLE_NOPMD_H
57706
57707 -#ifndef __ASSEMBLY__
57708 -
57709 #include <asm-generic/pgtable-nopud.h>
57710
57711 -struct mm_struct;
57712 -
57713 #define __PAGETABLE_PMD_FOLDED
57714
57715 +#define PMD_SHIFT PUD_SHIFT
57716 +#define PTRS_PER_PMD 1
57717 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57718 +#define PMD_MASK (~(PMD_SIZE-1))
57719 +
57720 +#ifndef __ASSEMBLY__
57721 +
57722 +struct mm_struct;
57723 +
57724 /*
57725 * Having the pmd type consist of a pud gets the size right, and allows
57726 * us to conceptually access the pud entry that this pmd is folded into
57727 @@ -16,11 +21,6 @@ struct mm_struct;
57728 */
57729 typedef struct { pud_t pud; } pmd_t;
57730
57731 -#define PMD_SHIFT PUD_SHIFT
57732 -#define PTRS_PER_PMD 1
57733 -#define PMD_SIZE (1UL << PMD_SHIFT)
57734 -#define PMD_MASK (~(PMD_SIZE-1))
57735 -
57736 /*
57737 * The "pud_xxx()" functions here are trivial for a folded two-level
57738 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57739 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57740 index 810431d..ccc3638 100644
57741 --- a/include/asm-generic/pgtable-nopud.h
57742 +++ b/include/asm-generic/pgtable-nopud.h
57743 @@ -1,10 +1,15 @@
57744 #ifndef _PGTABLE_NOPUD_H
57745 #define _PGTABLE_NOPUD_H
57746
57747 -#ifndef __ASSEMBLY__
57748 -
57749 #define __PAGETABLE_PUD_FOLDED
57750
57751 +#define PUD_SHIFT PGDIR_SHIFT
57752 +#define PTRS_PER_PUD 1
57753 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57754 +#define PUD_MASK (~(PUD_SIZE-1))
57755 +
57756 +#ifndef __ASSEMBLY__
57757 +
57758 /*
57759 * Having the pud type consist of a pgd gets the size right, and allows
57760 * us to conceptually access the pgd entry that this pud is folded into
57761 @@ -12,11 +17,6 @@
57762 */
57763 typedef struct { pgd_t pgd; } pud_t;
57764
57765 -#define PUD_SHIFT PGDIR_SHIFT
57766 -#define PTRS_PER_PUD 1
57767 -#define PUD_SIZE (1UL << PUD_SHIFT)
57768 -#define PUD_MASK (~(PUD_SIZE-1))
57769 -
57770 /*
57771 * The "pgd_xxx()" functions here are trivial for a folded two-level
57772 * setup: the pud is never bad, and a pud always exists (as it's folded
57773 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57774 index 76bff2b..c7a14e2 100644
57775 --- a/include/asm-generic/pgtable.h
57776 +++ b/include/asm-generic/pgtable.h
57777 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57778 #endif /* __HAVE_ARCH_PMD_WRITE */
57779 #endif
57780
57781 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57782 +static inline unsigned long pax_open_kernel(void) { return 0; }
57783 +#endif
57784 +
57785 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57786 +static inline unsigned long pax_close_kernel(void) { return 0; }
57787 +#endif
57788 +
57789 #endif /* !__ASSEMBLY__ */
57790
57791 #endif /* _ASM_GENERIC_PGTABLE_H */
57792 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57793 index b5e2e4c..6a5373e 100644
57794 --- a/include/asm-generic/vmlinux.lds.h
57795 +++ b/include/asm-generic/vmlinux.lds.h
57796 @@ -217,6 +217,7 @@
57797 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57798 VMLINUX_SYMBOL(__start_rodata) = .; \
57799 *(.rodata) *(.rodata.*) \
57800 + *(.data..read_only) \
57801 *(__vermagic) /* Kernel version magic */ \
57802 . = ALIGN(8); \
57803 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57804 @@ -722,17 +723,18 @@
57805 * section in the linker script will go there too. @phdr should have
57806 * a leading colon.
57807 *
57808 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57809 + * Note that this macros defines per_cpu_load as an absolute symbol.
57810 * If there is no need to put the percpu section at a predetermined
57811 * address, use PERCPU_SECTION.
57812 */
57813 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57814 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57815 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57816 + per_cpu_load = .; \
57817 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57818 - LOAD_OFFSET) { \
57819 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57820 PERCPU_INPUT(cacheline) \
57821 } phdr \
57822 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57823 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57824
57825 /**
57826 * PERCPU_SECTION - define output section for percpu area, simple version
57827 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57828 index bf4b2dc..2d0762f 100644
57829 --- a/include/drm/drmP.h
57830 +++ b/include/drm/drmP.h
57831 @@ -72,6 +72,7 @@
57832 #include <linux/workqueue.h>
57833 #include <linux/poll.h>
57834 #include <asm/pgalloc.h>
57835 +#include <asm/local.h>
57836 #include "drm.h"
57837
57838 #include <linux/idr.h>
57839 @@ -1038,7 +1039,7 @@ struct drm_device {
57840
57841 /** \name Usage Counters */
57842 /*@{ */
57843 - int open_count; /**< Outstanding files open */
57844 + local_t open_count; /**< Outstanding files open */
57845 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57846 atomic_t vma_count; /**< Outstanding vma areas open */
57847 int buf_use; /**< Buffers in use -- cannot alloc */
57848 @@ -1049,7 +1050,7 @@ struct drm_device {
57849 /*@{ */
57850 unsigned long counters;
57851 enum drm_stat_type types[15];
57852 - atomic_t counts[15];
57853 + atomic_unchecked_t counts[15];
57854 /*@} */
57855
57856 struct list_head filelist;
57857 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57858 index 73b0712..0b7ef2f 100644
57859 --- a/include/drm/drm_crtc_helper.h
57860 +++ b/include/drm/drm_crtc_helper.h
57861 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57862
57863 /* disable crtc when not in use - more explicit than dpms off */
57864 void (*disable)(struct drm_crtc *crtc);
57865 -};
57866 +} __no_const;
57867
57868 struct drm_encoder_helper_funcs {
57869 void (*dpms)(struct drm_encoder *encoder, int mode);
57870 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57871 struct drm_connector *connector);
57872 /* disable encoder when not in use - more explicit than dpms off */
57873 void (*disable)(struct drm_encoder *encoder);
57874 -};
57875 +} __no_const;
57876
57877 struct drm_connector_helper_funcs {
57878 int (*get_modes)(struct drm_connector *connector);
57879 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57880 index 26c1f78..6722682 100644
57881 --- a/include/drm/ttm/ttm_memory.h
57882 +++ b/include/drm/ttm/ttm_memory.h
57883 @@ -47,7 +47,7 @@
57884
57885 struct ttm_mem_shrink {
57886 int (*do_shrink) (struct ttm_mem_shrink *);
57887 -};
57888 +} __no_const;
57889
57890 /**
57891 * struct ttm_mem_global - Global memory accounting structure.
57892 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57893 index e86dfca..40cc55f 100644
57894 --- a/include/linux/a.out.h
57895 +++ b/include/linux/a.out.h
57896 @@ -39,6 +39,14 @@ enum machine_type {
57897 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57898 };
57899
57900 +/* Constants for the N_FLAGS field */
57901 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57902 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57903 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57904 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57905 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57906 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57907 +
57908 #if !defined (N_MAGIC)
57909 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57910 #endif
57911 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57912 index 49a83ca..df96b54 100644
57913 --- a/include/linux/atmdev.h
57914 +++ b/include/linux/atmdev.h
57915 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57916 #endif
57917
57918 struct k_atm_aal_stats {
57919 -#define __HANDLE_ITEM(i) atomic_t i
57920 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57921 __AAL_STAT_ITEMS
57922 #undef __HANDLE_ITEM
57923 };
57924 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57925 index fd88a39..8a801b4 100644
57926 --- a/include/linux/binfmts.h
57927 +++ b/include/linux/binfmts.h
57928 @@ -18,7 +18,7 @@ struct pt_regs;
57929 #define BINPRM_BUF_SIZE 128
57930
57931 #ifdef __KERNEL__
57932 -#include <linux/list.h>
57933 +#include <linux/sched.h>
57934
57935 #define CORENAME_MAX_SIZE 128
57936
57937 @@ -58,6 +58,7 @@ struct linux_binprm {
57938 unsigned interp_flags;
57939 unsigned interp_data;
57940 unsigned long loader, exec;
57941 + char tcomm[TASK_COMM_LEN];
57942 };
57943
57944 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
57945 @@ -88,6 +89,7 @@ struct linux_binfmt {
57946 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57947 int (*load_shlib)(struct file *);
57948 int (*core_dump)(struct coredump_params *cprm);
57949 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57950 unsigned long min_coredump; /* minimal dump size */
57951 };
57952
57953 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57954 index 0ed1eb0..3ab569b 100644
57955 --- a/include/linux/blkdev.h
57956 +++ b/include/linux/blkdev.h
57957 @@ -1315,7 +1315,7 @@ struct block_device_operations {
57958 /* this callback is with swap_lock and sometimes page table lock held */
57959 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57960 struct module *owner;
57961 -};
57962 +} __do_const;
57963
57964 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57965 unsigned long);
57966 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57967 index 4d1a074..88f929a 100644
57968 --- a/include/linux/blktrace_api.h
57969 +++ b/include/linux/blktrace_api.h
57970 @@ -162,7 +162,7 @@ struct blk_trace {
57971 struct dentry *dir;
57972 struct dentry *dropped_file;
57973 struct dentry *msg_file;
57974 - atomic_t dropped;
57975 + atomic_unchecked_t dropped;
57976 };
57977
57978 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57979 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57980 index 83195fb..0b0f77d 100644
57981 --- a/include/linux/byteorder/little_endian.h
57982 +++ b/include/linux/byteorder/little_endian.h
57983 @@ -42,51 +42,51 @@
57984
57985 static inline __le64 __cpu_to_le64p(const __u64 *p)
57986 {
57987 - return (__force __le64)*p;
57988 + return (__force const __le64)*p;
57989 }
57990 static inline __u64 __le64_to_cpup(const __le64 *p)
57991 {
57992 - return (__force __u64)*p;
57993 + return (__force const __u64)*p;
57994 }
57995 static inline __le32 __cpu_to_le32p(const __u32 *p)
57996 {
57997 - return (__force __le32)*p;
57998 + return (__force const __le32)*p;
57999 }
58000 static inline __u32 __le32_to_cpup(const __le32 *p)
58001 {
58002 - return (__force __u32)*p;
58003 + return (__force const __u32)*p;
58004 }
58005 static inline __le16 __cpu_to_le16p(const __u16 *p)
58006 {
58007 - return (__force __le16)*p;
58008 + return (__force const __le16)*p;
58009 }
58010 static inline __u16 __le16_to_cpup(const __le16 *p)
58011 {
58012 - return (__force __u16)*p;
58013 + return (__force const __u16)*p;
58014 }
58015 static inline __be64 __cpu_to_be64p(const __u64 *p)
58016 {
58017 - return (__force __be64)__swab64p(p);
58018 + return (__force const __be64)__swab64p(p);
58019 }
58020 static inline __u64 __be64_to_cpup(const __be64 *p)
58021 {
58022 - return __swab64p((__u64 *)p);
58023 + return __swab64p((const __u64 *)p);
58024 }
58025 static inline __be32 __cpu_to_be32p(const __u32 *p)
58026 {
58027 - return (__force __be32)__swab32p(p);
58028 + return (__force const __be32)__swab32p(p);
58029 }
58030 static inline __u32 __be32_to_cpup(const __be32 *p)
58031 {
58032 - return __swab32p((__u32 *)p);
58033 + return __swab32p((const __u32 *)p);
58034 }
58035 static inline __be16 __cpu_to_be16p(const __u16 *p)
58036 {
58037 - return (__force __be16)__swab16p(p);
58038 + return (__force const __be16)__swab16p(p);
58039 }
58040 static inline __u16 __be16_to_cpup(const __be16 *p)
58041 {
58042 - return __swab16p((__u16 *)p);
58043 + return __swab16p((const __u16 *)p);
58044 }
58045 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58046 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58047 diff --git a/include/linux/cache.h b/include/linux/cache.h
58048 index 4c57065..4307975 100644
58049 --- a/include/linux/cache.h
58050 +++ b/include/linux/cache.h
58051 @@ -16,6 +16,10 @@
58052 #define __read_mostly
58053 #endif
58054
58055 +#ifndef __read_only
58056 +#define __read_only __read_mostly
58057 +#endif
58058 +
58059 #ifndef ____cacheline_aligned
58060 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58061 #endif
58062 diff --git a/include/linux/capability.h b/include/linux/capability.h
58063 index a63d13d..069bfd5 100644
58064 --- a/include/linux/capability.h
58065 +++ b/include/linux/capability.h
58066 @@ -548,6 +548,9 @@ extern bool capable(int cap);
58067 extern bool ns_capable(struct user_namespace *ns, int cap);
58068 extern bool task_ns_capable(struct task_struct *t, int cap);
58069 extern bool nsown_capable(int cap);
58070 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
58071 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58072 +extern bool capable_nolog(int cap);
58073
58074 /* audit system wants to get cap info from files as well */
58075 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
58076 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
58077 index 04ffb2e..6799180 100644
58078 --- a/include/linux/cleancache.h
58079 +++ b/include/linux/cleancache.h
58080 @@ -31,7 +31,7 @@ struct cleancache_ops {
58081 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
58082 void (*flush_inode)(int, struct cleancache_filekey);
58083 void (*flush_fs)(int);
58084 -};
58085 +} __no_const;
58086
58087 extern struct cleancache_ops
58088 cleancache_register_ops(struct cleancache_ops *ops);
58089 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
58090 index dfadc96..c0e70c1 100644
58091 --- a/include/linux/compiler-gcc4.h
58092 +++ b/include/linux/compiler-gcc4.h
58093 @@ -31,6 +31,12 @@
58094
58095
58096 #if __GNUC_MINOR__ >= 5
58097 +
58098 +#ifdef CONSTIFY_PLUGIN
58099 +#define __no_const __attribute__((no_const))
58100 +#define __do_const __attribute__((do_const))
58101 +#endif
58102 +
58103 /*
58104 * Mark a position in code as unreachable. This can be used to
58105 * suppress control flow warnings after asm blocks that transfer
58106 @@ -46,6 +52,11 @@
58107 #define __noclone __attribute__((__noclone__))
58108
58109 #endif
58110 +
58111 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58112 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58113 +#define __bos0(ptr) __bos((ptr), 0)
58114 +#define __bos1(ptr) __bos((ptr), 1)
58115 #endif
58116
58117 #if __GNUC_MINOR__ > 0
58118 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
58119 index 320d6c9..8573a1c 100644
58120 --- a/include/linux/compiler.h
58121 +++ b/include/linux/compiler.h
58122 @@ -5,31 +5,62 @@
58123
58124 #ifdef __CHECKER__
58125 # define __user __attribute__((noderef, address_space(1)))
58126 +# define __force_user __force __user
58127 # define __kernel __attribute__((address_space(0)))
58128 +# define __force_kernel __force __kernel
58129 # define __safe __attribute__((safe))
58130 # define __force __attribute__((force))
58131 # define __nocast __attribute__((nocast))
58132 # define __iomem __attribute__((noderef, address_space(2)))
58133 +# define __force_iomem __force __iomem
58134 # define __acquires(x) __attribute__((context(x,0,1)))
58135 # define __releases(x) __attribute__((context(x,1,0)))
58136 # define __acquire(x) __context__(x,1)
58137 # define __release(x) __context__(x,-1)
58138 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
58139 # define __percpu __attribute__((noderef, address_space(3)))
58140 +# define __force_percpu __force __percpu
58141 #ifdef CONFIG_SPARSE_RCU_POINTER
58142 # define __rcu __attribute__((noderef, address_space(4)))
58143 +# define __force_rcu __force __rcu
58144 #else
58145 # define __rcu
58146 +# define __force_rcu
58147 #endif
58148 extern void __chk_user_ptr(const volatile void __user *);
58149 extern void __chk_io_ptr(const volatile void __iomem *);
58150 +#elif defined(CHECKER_PLUGIN)
58151 +//# define __user
58152 +//# define __force_user
58153 +//# define __kernel
58154 +//# define __force_kernel
58155 +# define __safe
58156 +# define __force
58157 +# define __nocast
58158 +# define __iomem
58159 +# define __force_iomem
58160 +# define __chk_user_ptr(x) (void)0
58161 +# define __chk_io_ptr(x) (void)0
58162 +# define __builtin_warning(x, y...) (1)
58163 +# define __acquires(x)
58164 +# define __releases(x)
58165 +# define __acquire(x) (void)0
58166 +# define __release(x) (void)0
58167 +# define __cond_lock(x,c) (c)
58168 +# define __percpu
58169 +# define __force_percpu
58170 +# define __rcu
58171 +# define __force_rcu
58172 #else
58173 # define __user
58174 +# define __force_user
58175 # define __kernel
58176 +# define __force_kernel
58177 # define __safe
58178 # define __force
58179 # define __nocast
58180 # define __iomem
58181 +# define __force_iomem
58182 # define __chk_user_ptr(x) (void)0
58183 # define __chk_io_ptr(x) (void)0
58184 # define __builtin_warning(x, y...) (1)
58185 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
58186 # define __release(x) (void)0
58187 # define __cond_lock(x,c) (c)
58188 # define __percpu
58189 +# define __force_percpu
58190 # define __rcu
58191 +# define __force_rcu
58192 #endif
58193
58194 #ifdef __KERNEL__
58195 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58196 # define __attribute_const__ /* unimplemented */
58197 #endif
58198
58199 +#ifndef __no_const
58200 +# define __no_const
58201 +#endif
58202 +
58203 +#ifndef __do_const
58204 +# define __do_const
58205 +#endif
58206 +
58207 /*
58208 * Tell gcc if a function is cold. The compiler will assume any path
58209 * directly leading to the call is unlikely.
58210 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58211 #define __cold
58212 #endif
58213
58214 +#ifndef __alloc_size
58215 +#define __alloc_size(...)
58216 +#endif
58217 +
58218 +#ifndef __bos
58219 +#define __bos(ptr, arg)
58220 +#endif
58221 +
58222 +#ifndef __bos0
58223 +#define __bos0(ptr)
58224 +#endif
58225 +
58226 +#ifndef __bos1
58227 +#define __bos1(ptr)
58228 +#endif
58229 +
58230 /* Simple shorthand for a section definition */
58231 #ifndef __section
58232 # define __section(S) __attribute__ ((__section__(#S)))
58233 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58234 * use is to mediate communication between process-level code and irq/NMI
58235 * handlers, all running on the same CPU.
58236 */
58237 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58238 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58239 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58240
58241 #endif /* __LINUX_COMPILER_H */
58242 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
58243 index e9eaec5..bfeb9bb 100644
58244 --- a/include/linux/cpuset.h
58245 +++ b/include/linux/cpuset.h
58246 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
58247 * nodemask.
58248 */
58249 smp_mb();
58250 - --ACCESS_ONCE(current->mems_allowed_change_disable);
58251 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58252 }
58253
58254 static inline void set_mems_allowed(nodemask_t nodemask)
58255 diff --git a/include/linux/cred.h b/include/linux/cred.h
58256 index 4030896..8d6f342 100644
58257 --- a/include/linux/cred.h
58258 +++ b/include/linux/cred.h
58259 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58260 static inline void validate_process_creds(void)
58261 {
58262 }
58263 +static inline void validate_task_creds(struct task_struct *task)
58264 +{
58265 +}
58266 #endif
58267
58268 /**
58269 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58270 index 8a94217..15d49e3 100644
58271 --- a/include/linux/crypto.h
58272 +++ b/include/linux/crypto.h
58273 @@ -365,7 +365,7 @@ struct cipher_tfm {
58274 const u8 *key, unsigned int keylen);
58275 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58276 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58277 -};
58278 +} __no_const;
58279
58280 struct hash_tfm {
58281 int (*init)(struct hash_desc *desc);
58282 @@ -386,13 +386,13 @@ struct compress_tfm {
58283 int (*cot_decompress)(struct crypto_tfm *tfm,
58284 const u8 *src, unsigned int slen,
58285 u8 *dst, unsigned int *dlen);
58286 -};
58287 +} __no_const;
58288
58289 struct rng_tfm {
58290 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58291 unsigned int dlen);
58292 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58293 -};
58294 +} __no_const;
58295
58296 #define crt_ablkcipher crt_u.ablkcipher
58297 #define crt_aead crt_u.aead
58298 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58299 index 7925bf0..d5143d2 100644
58300 --- a/include/linux/decompress/mm.h
58301 +++ b/include/linux/decompress/mm.h
58302 @@ -77,7 +77,7 @@ static void free(void *where)
58303 * warnings when not needed (indeed large_malloc / large_free are not
58304 * needed by inflate */
58305
58306 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58307 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58308 #define free(a) kfree(a)
58309
58310 #define large_malloc(a) vmalloc(a)
58311 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58312 index e13117c..e9fc938 100644
58313 --- a/include/linux/dma-mapping.h
58314 +++ b/include/linux/dma-mapping.h
58315 @@ -46,7 +46,7 @@ struct dma_map_ops {
58316 u64 (*get_required_mask)(struct device *dev);
58317 #endif
58318 int is_phys;
58319 -};
58320 +} __do_const;
58321
58322 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58323
58324 diff --git a/include/linux/efi.h b/include/linux/efi.h
58325 index 2362a0b..cfaf8fcc 100644
58326 --- a/include/linux/efi.h
58327 +++ b/include/linux/efi.h
58328 @@ -446,7 +446,7 @@ struct efivar_operations {
58329 efi_get_variable_t *get_variable;
58330 efi_get_next_variable_t *get_next_variable;
58331 efi_set_variable_t *set_variable;
58332 -};
58333 +} __no_const;
58334
58335 struct efivars {
58336 /*
58337 diff --git a/include/linux/elf.h b/include/linux/elf.h
58338 index 31f0508..5421c01 100644
58339 --- a/include/linux/elf.h
58340 +++ b/include/linux/elf.h
58341 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58342 #define PT_GNU_EH_FRAME 0x6474e550
58343
58344 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58345 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58346 +
58347 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58348 +
58349 +/* Constants for the e_flags field */
58350 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58351 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58352 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58353 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58354 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58355 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58356
58357 /*
58358 * Extended Numbering
58359 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58360 #define DT_DEBUG 21
58361 #define DT_TEXTREL 22
58362 #define DT_JMPREL 23
58363 +#define DT_FLAGS 30
58364 + #define DF_TEXTREL 0x00000004
58365 #define DT_ENCODING 32
58366 #define OLD_DT_LOOS 0x60000000
58367 #define DT_LOOS 0x6000000d
58368 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58369 #define PF_W 0x2
58370 #define PF_X 0x1
58371
58372 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58373 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58374 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58375 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58376 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58377 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58378 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58379 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58380 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58381 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58382 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58383 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58384 +
58385 typedef struct elf32_phdr{
58386 Elf32_Word p_type;
58387 Elf32_Off p_offset;
58388 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58389 #define EI_OSABI 7
58390 #define EI_PAD 8
58391
58392 +#define EI_PAX 14
58393 +
58394 #define ELFMAG0 0x7f /* EI_MAG */
58395 #define ELFMAG1 'E'
58396 #define ELFMAG2 'L'
58397 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58398 #define elf_note elf32_note
58399 #define elf_addr_t Elf32_Off
58400 #define Elf_Half Elf32_Half
58401 +#define elf_dyn Elf32_Dyn
58402
58403 #else
58404
58405 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58406 #define elf_note elf64_note
58407 #define elf_addr_t Elf64_Off
58408 #define Elf_Half Elf64_Half
58409 +#define elf_dyn Elf64_Dyn
58410
58411 #endif
58412
58413 diff --git a/include/linux/filter.h b/include/linux/filter.h
58414 index 8eeb205..d59bfa2 100644
58415 --- a/include/linux/filter.h
58416 +++ b/include/linux/filter.h
58417 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58418
58419 struct sk_buff;
58420 struct sock;
58421 +struct bpf_jit_work;
58422
58423 struct sk_filter
58424 {
58425 @@ -141,6 +142,9 @@ struct sk_filter
58426 unsigned int len; /* Number of filter blocks */
58427 unsigned int (*bpf_func)(const struct sk_buff *skb,
58428 const struct sock_filter *filter);
58429 +#ifdef CONFIG_BPF_JIT
58430 + struct bpf_jit_work *work;
58431 +#endif
58432 struct rcu_head rcu;
58433 struct sock_filter insns[0];
58434 };
58435 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58436 index 84ccf8e..2e9b14c 100644
58437 --- a/include/linux/firewire.h
58438 +++ b/include/linux/firewire.h
58439 @@ -428,7 +428,7 @@ struct fw_iso_context {
58440 union {
58441 fw_iso_callback_t sc;
58442 fw_iso_mc_callback_t mc;
58443 - } callback;
58444 + } __no_const callback;
58445 void *callback_data;
58446 };
58447
58448 diff --git a/include/linux/fs.h b/include/linux/fs.h
58449 index 10b2288..09180e4 100644
58450 --- a/include/linux/fs.h
58451 +++ b/include/linux/fs.h
58452 @@ -1609,7 +1609,8 @@ struct file_operations {
58453 int (*setlease)(struct file *, long, struct file_lock **);
58454 long (*fallocate)(struct file *file, int mode, loff_t offset,
58455 loff_t len);
58456 -};
58457 +} __do_const;
58458 +typedef struct file_operations __no_const file_operations_no_const;
58459
58460 struct inode_operations {
58461 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58462 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58463 index 003dc0f..3c4ea97 100644
58464 --- a/include/linux/fs_struct.h
58465 +++ b/include/linux/fs_struct.h
58466 @@ -6,7 +6,7 @@
58467 #include <linux/seqlock.h>
58468
58469 struct fs_struct {
58470 - int users;
58471 + atomic_t users;
58472 spinlock_t lock;
58473 seqcount_t seq;
58474 int umask;
58475 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58476 index ce31408..b1ad003 100644
58477 --- a/include/linux/fscache-cache.h
58478 +++ b/include/linux/fscache-cache.h
58479 @@ -102,7 +102,7 @@ struct fscache_operation {
58480 fscache_operation_release_t release;
58481 };
58482
58483 -extern atomic_t fscache_op_debug_id;
58484 +extern atomic_unchecked_t fscache_op_debug_id;
58485 extern void fscache_op_work_func(struct work_struct *work);
58486
58487 extern void fscache_enqueue_operation(struct fscache_operation *);
58488 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58489 {
58490 INIT_WORK(&op->work, fscache_op_work_func);
58491 atomic_set(&op->usage, 1);
58492 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58493 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58494 op->processor = processor;
58495 op->release = release;
58496 INIT_LIST_HEAD(&op->pend_link);
58497 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58498 index 2a53f10..0187fdf 100644
58499 --- a/include/linux/fsnotify.h
58500 +++ b/include/linux/fsnotify.h
58501 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58502 */
58503 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58504 {
58505 - return kstrdup(name, GFP_KERNEL);
58506 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58507 }
58508
58509 /*
58510 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58511 index 91d0e0a3..035666b 100644
58512 --- a/include/linux/fsnotify_backend.h
58513 +++ b/include/linux/fsnotify_backend.h
58514 @@ -105,6 +105,7 @@ struct fsnotify_ops {
58515 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58516 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58517 };
58518 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58519
58520 /*
58521 * A group is a "thing" that wants to receive notification about filesystem
58522 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58523 index c3da42d..c70e0df 100644
58524 --- a/include/linux/ftrace_event.h
58525 +++ b/include/linux/ftrace_event.h
58526 @@ -97,7 +97,7 @@ struct trace_event_functions {
58527 trace_print_func raw;
58528 trace_print_func hex;
58529 trace_print_func binary;
58530 -};
58531 +} __no_const;
58532
58533 struct trace_event {
58534 struct hlist_node node;
58535 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58536 extern int trace_add_event_call(struct ftrace_event_call *call);
58537 extern void trace_remove_event_call(struct ftrace_event_call *call);
58538
58539 -#define is_signed_type(type) (((type)(-1)) < 0)
58540 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58541
58542 int trace_set_clr_event(const char *system, const char *event, int set);
58543
58544 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58545 index 6d18f35..ab71e2c 100644
58546 --- a/include/linux/genhd.h
58547 +++ b/include/linux/genhd.h
58548 @@ -185,7 +185,7 @@ struct gendisk {
58549 struct kobject *slave_dir;
58550
58551 struct timer_rand_state *random;
58552 - atomic_t sync_io; /* RAID */
58553 + atomic_unchecked_t sync_io; /* RAID */
58554 struct disk_events *ev;
58555 #ifdef CONFIG_BLK_DEV_INTEGRITY
58556 struct blk_integrity *integrity;
58557 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58558 new file mode 100644
58559 index 0000000..8a130b6
58560 --- /dev/null
58561 +++ b/include/linux/gracl.h
58562 @@ -0,0 +1,319 @@
58563 +#ifndef GR_ACL_H
58564 +#define GR_ACL_H
58565 +
58566 +#include <linux/grdefs.h>
58567 +#include <linux/resource.h>
58568 +#include <linux/capability.h>
58569 +#include <linux/dcache.h>
58570 +#include <asm/resource.h>
58571 +
58572 +/* Major status information */
58573 +
58574 +#define GR_VERSION "grsecurity 2.9"
58575 +#define GRSECURITY_VERSION 0x2900
58576 +
58577 +enum {
58578 + GR_SHUTDOWN = 0,
58579 + GR_ENABLE = 1,
58580 + GR_SPROLE = 2,
58581 + GR_RELOAD = 3,
58582 + GR_SEGVMOD = 4,
58583 + GR_STATUS = 5,
58584 + GR_UNSPROLE = 6,
58585 + GR_PASSSET = 7,
58586 + GR_SPROLEPAM = 8,
58587 +};
58588 +
58589 +/* Password setup definitions
58590 + * kernel/grhash.c */
58591 +enum {
58592 + GR_PW_LEN = 128,
58593 + GR_SALT_LEN = 16,
58594 + GR_SHA_LEN = 32,
58595 +};
58596 +
58597 +enum {
58598 + GR_SPROLE_LEN = 64,
58599 +};
58600 +
58601 +enum {
58602 + GR_NO_GLOB = 0,
58603 + GR_REG_GLOB,
58604 + GR_CREATE_GLOB
58605 +};
58606 +
58607 +#define GR_NLIMITS 32
58608 +
58609 +/* Begin Data Structures */
58610 +
58611 +struct sprole_pw {
58612 + unsigned char *rolename;
58613 + unsigned char salt[GR_SALT_LEN];
58614 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58615 +};
58616 +
58617 +struct name_entry {
58618 + __u32 key;
58619 + ino_t inode;
58620 + dev_t device;
58621 + char *name;
58622 + __u16 len;
58623 + __u8 deleted;
58624 + struct name_entry *prev;
58625 + struct name_entry *next;
58626 +};
58627 +
58628 +struct inodev_entry {
58629 + struct name_entry *nentry;
58630 + struct inodev_entry *prev;
58631 + struct inodev_entry *next;
58632 +};
58633 +
58634 +struct acl_role_db {
58635 + struct acl_role_label **r_hash;
58636 + __u32 r_size;
58637 +};
58638 +
58639 +struct inodev_db {
58640 + struct inodev_entry **i_hash;
58641 + __u32 i_size;
58642 +};
58643 +
58644 +struct name_db {
58645 + struct name_entry **n_hash;
58646 + __u32 n_size;
58647 +};
58648 +
58649 +struct crash_uid {
58650 + uid_t uid;
58651 + unsigned long expires;
58652 +};
58653 +
58654 +struct gr_hash_struct {
58655 + void **table;
58656 + void **nametable;
58657 + void *first;
58658 + __u32 table_size;
58659 + __u32 used_size;
58660 + int type;
58661 +};
58662 +
58663 +/* Userspace Grsecurity ACL data structures */
58664 +
58665 +struct acl_subject_label {
58666 + char *filename;
58667 + ino_t inode;
58668 + dev_t device;
58669 + __u32 mode;
58670 + kernel_cap_t cap_mask;
58671 + kernel_cap_t cap_lower;
58672 + kernel_cap_t cap_invert_audit;
58673 +
58674 + struct rlimit res[GR_NLIMITS];
58675 + __u32 resmask;
58676 +
58677 + __u8 user_trans_type;
58678 + __u8 group_trans_type;
58679 + uid_t *user_transitions;
58680 + gid_t *group_transitions;
58681 + __u16 user_trans_num;
58682 + __u16 group_trans_num;
58683 +
58684 + __u32 sock_families[2];
58685 + __u32 ip_proto[8];
58686 + __u32 ip_type;
58687 + struct acl_ip_label **ips;
58688 + __u32 ip_num;
58689 + __u32 inaddr_any_override;
58690 +
58691 + __u32 crashes;
58692 + unsigned long expires;
58693 +
58694 + struct acl_subject_label *parent_subject;
58695 + struct gr_hash_struct *hash;
58696 + struct acl_subject_label *prev;
58697 + struct acl_subject_label *next;
58698 +
58699 + struct acl_object_label **obj_hash;
58700 + __u32 obj_hash_size;
58701 + __u16 pax_flags;
58702 +};
58703 +
58704 +struct role_allowed_ip {
58705 + __u32 addr;
58706 + __u32 netmask;
58707 +
58708 + struct role_allowed_ip *prev;
58709 + struct role_allowed_ip *next;
58710 +};
58711 +
58712 +struct role_transition {
58713 + char *rolename;
58714 +
58715 + struct role_transition *prev;
58716 + struct role_transition *next;
58717 +};
58718 +
58719 +struct acl_role_label {
58720 + char *rolename;
58721 + uid_t uidgid;
58722 + __u16 roletype;
58723 +
58724 + __u16 auth_attempts;
58725 + unsigned long expires;
58726 +
58727 + struct acl_subject_label *root_label;
58728 + struct gr_hash_struct *hash;
58729 +
58730 + struct acl_role_label *prev;
58731 + struct acl_role_label *next;
58732 +
58733 + struct role_transition *transitions;
58734 + struct role_allowed_ip *allowed_ips;
58735 + uid_t *domain_children;
58736 + __u16 domain_child_num;
58737 +
58738 + umode_t umask;
58739 +
58740 + struct acl_subject_label **subj_hash;
58741 + __u32 subj_hash_size;
58742 +};
58743 +
58744 +struct user_acl_role_db {
58745 + struct acl_role_label **r_table;
58746 + __u32 num_pointers; /* Number of allocations to track */
58747 + __u32 num_roles; /* Number of roles */
58748 + __u32 num_domain_children; /* Number of domain children */
58749 + __u32 num_subjects; /* Number of subjects */
58750 + __u32 num_objects; /* Number of objects */
58751 +};
58752 +
58753 +struct acl_object_label {
58754 + char *filename;
58755 + ino_t inode;
58756 + dev_t device;
58757 + __u32 mode;
58758 +
58759 + struct acl_subject_label *nested;
58760 + struct acl_object_label *globbed;
58761 +
58762 + /* next two structures not used */
58763 +
58764 + struct acl_object_label *prev;
58765 + struct acl_object_label *next;
58766 +};
58767 +
58768 +struct acl_ip_label {
58769 + char *iface;
58770 + __u32 addr;
58771 + __u32 netmask;
58772 + __u16 low, high;
58773 + __u8 mode;
58774 + __u32 type;
58775 + __u32 proto[8];
58776 +
58777 + /* next two structures not used */
58778 +
58779 + struct acl_ip_label *prev;
58780 + struct acl_ip_label *next;
58781 +};
58782 +
58783 +struct gr_arg {
58784 + struct user_acl_role_db role_db;
58785 + unsigned char pw[GR_PW_LEN];
58786 + unsigned char salt[GR_SALT_LEN];
58787 + unsigned char sum[GR_SHA_LEN];
58788 + unsigned char sp_role[GR_SPROLE_LEN];
58789 + struct sprole_pw *sprole_pws;
58790 + dev_t segv_device;
58791 + ino_t segv_inode;
58792 + uid_t segv_uid;
58793 + __u16 num_sprole_pws;
58794 + __u16 mode;
58795 +};
58796 +
58797 +struct gr_arg_wrapper {
58798 + struct gr_arg *arg;
58799 + __u32 version;
58800 + __u32 size;
58801 +};
58802 +
58803 +struct subject_map {
58804 + struct acl_subject_label *user;
58805 + struct acl_subject_label *kernel;
58806 + struct subject_map *prev;
58807 + struct subject_map *next;
58808 +};
58809 +
58810 +struct acl_subj_map_db {
58811 + struct subject_map **s_hash;
58812 + __u32 s_size;
58813 +};
58814 +
58815 +/* End Data Structures Section */
58816 +
58817 +/* Hash functions generated by empirical testing by Brad Spengler
58818 + Makes good use of the low bits of the inode. Generally 0-1 times
58819 + in loop for successful match. 0-3 for unsuccessful match.
58820 + Shift/add algorithm with modulus of table size and an XOR*/
58821 +
58822 +static __inline__ unsigned int
58823 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58824 +{
58825 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58826 +}
58827 +
58828 + static __inline__ unsigned int
58829 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58830 +{
58831 + return ((const unsigned long)userp % sz);
58832 +}
58833 +
58834 +static __inline__ unsigned int
58835 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58836 +{
58837 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58838 +}
58839 +
58840 +static __inline__ unsigned int
58841 +nhash(const char *name, const __u16 len, const unsigned int sz)
58842 +{
58843 + return full_name_hash((const unsigned char *)name, len) % sz;
58844 +}
58845 +
58846 +#define FOR_EACH_ROLE_START(role) \
58847 + role = role_list; \
58848 + while (role) {
58849 +
58850 +#define FOR_EACH_ROLE_END(role) \
58851 + role = role->prev; \
58852 + }
58853 +
58854 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58855 + subj = NULL; \
58856 + iter = 0; \
58857 + while (iter < role->subj_hash_size) { \
58858 + if (subj == NULL) \
58859 + subj = role->subj_hash[iter]; \
58860 + if (subj == NULL) { \
58861 + iter++; \
58862 + continue; \
58863 + }
58864 +
58865 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58866 + subj = subj->next; \
58867 + if (subj == NULL) \
58868 + iter++; \
58869 + }
58870 +
58871 +
58872 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58873 + subj = role->hash->first; \
58874 + while (subj != NULL) {
58875 +
58876 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58877 + subj = subj->next; \
58878 + }
58879 +
58880 +#endif
58881 +
58882 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58883 new file mode 100644
58884 index 0000000..323ecf2
58885 --- /dev/null
58886 +++ b/include/linux/gralloc.h
58887 @@ -0,0 +1,9 @@
58888 +#ifndef __GRALLOC_H
58889 +#define __GRALLOC_H
58890 +
58891 +void acl_free_all(void);
58892 +int acl_alloc_stack_init(unsigned long size);
58893 +void *acl_alloc(unsigned long len);
58894 +void *acl_alloc_num(unsigned long num, unsigned long len);
58895 +
58896 +#endif
58897 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58898 new file mode 100644
58899 index 0000000..b30e9bc
58900 --- /dev/null
58901 +++ b/include/linux/grdefs.h
58902 @@ -0,0 +1,140 @@
58903 +#ifndef GRDEFS_H
58904 +#define GRDEFS_H
58905 +
58906 +/* Begin grsecurity status declarations */
58907 +
58908 +enum {
58909 + GR_READY = 0x01,
58910 + GR_STATUS_INIT = 0x00 // disabled state
58911 +};
58912 +
58913 +/* Begin ACL declarations */
58914 +
58915 +/* Role flags */
58916 +
58917 +enum {
58918 + GR_ROLE_USER = 0x0001,
58919 + GR_ROLE_GROUP = 0x0002,
58920 + GR_ROLE_DEFAULT = 0x0004,
58921 + GR_ROLE_SPECIAL = 0x0008,
58922 + GR_ROLE_AUTH = 0x0010,
58923 + GR_ROLE_NOPW = 0x0020,
58924 + GR_ROLE_GOD = 0x0040,
58925 + GR_ROLE_LEARN = 0x0080,
58926 + GR_ROLE_TPE = 0x0100,
58927 + GR_ROLE_DOMAIN = 0x0200,
58928 + GR_ROLE_PAM = 0x0400,
58929 + GR_ROLE_PERSIST = 0x0800
58930 +};
58931 +
58932 +/* ACL Subject and Object mode flags */
58933 +enum {
58934 + GR_DELETED = 0x80000000
58935 +};
58936 +
58937 +/* ACL Object-only mode flags */
58938 +enum {
58939 + GR_READ = 0x00000001,
58940 + GR_APPEND = 0x00000002,
58941 + GR_WRITE = 0x00000004,
58942 + GR_EXEC = 0x00000008,
58943 + GR_FIND = 0x00000010,
58944 + GR_INHERIT = 0x00000020,
58945 + GR_SETID = 0x00000040,
58946 + GR_CREATE = 0x00000080,
58947 + GR_DELETE = 0x00000100,
58948 + GR_LINK = 0x00000200,
58949 + GR_AUDIT_READ = 0x00000400,
58950 + GR_AUDIT_APPEND = 0x00000800,
58951 + GR_AUDIT_WRITE = 0x00001000,
58952 + GR_AUDIT_EXEC = 0x00002000,
58953 + GR_AUDIT_FIND = 0x00004000,
58954 + GR_AUDIT_INHERIT= 0x00008000,
58955 + GR_AUDIT_SETID = 0x00010000,
58956 + GR_AUDIT_CREATE = 0x00020000,
58957 + GR_AUDIT_DELETE = 0x00040000,
58958 + GR_AUDIT_LINK = 0x00080000,
58959 + GR_PTRACERD = 0x00100000,
58960 + GR_NOPTRACE = 0x00200000,
58961 + GR_SUPPRESS = 0x00400000,
58962 + GR_NOLEARN = 0x00800000,
58963 + GR_INIT_TRANSFER= 0x01000000
58964 +};
58965 +
58966 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58967 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58968 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58969 +
58970 +/* ACL subject-only mode flags */
58971 +enum {
58972 + GR_KILL = 0x00000001,
58973 + GR_VIEW = 0x00000002,
58974 + GR_PROTECTED = 0x00000004,
58975 + GR_LEARN = 0x00000008,
58976 + GR_OVERRIDE = 0x00000010,
58977 + /* just a placeholder, this mode is only used in userspace */
58978 + GR_DUMMY = 0x00000020,
58979 + GR_PROTSHM = 0x00000040,
58980 + GR_KILLPROC = 0x00000080,
58981 + GR_KILLIPPROC = 0x00000100,
58982 + /* just a placeholder, this mode is only used in userspace */
58983 + GR_NOTROJAN = 0x00000200,
58984 + GR_PROTPROCFD = 0x00000400,
58985 + GR_PROCACCT = 0x00000800,
58986 + GR_RELAXPTRACE = 0x00001000,
58987 + GR_NESTED = 0x00002000,
58988 + GR_INHERITLEARN = 0x00004000,
58989 + GR_PROCFIND = 0x00008000,
58990 + GR_POVERRIDE = 0x00010000,
58991 + GR_KERNELAUTH = 0x00020000,
58992 + GR_ATSECURE = 0x00040000,
58993 + GR_SHMEXEC = 0x00080000
58994 +};
58995 +
58996 +enum {
58997 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58998 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58999 + GR_PAX_ENABLE_MPROTECT = 0x0004,
59000 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
59001 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
59002 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
59003 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
59004 + GR_PAX_DISABLE_MPROTECT = 0x0400,
59005 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
59006 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
59007 +};
59008 +
59009 +enum {
59010 + GR_ID_USER = 0x01,
59011 + GR_ID_GROUP = 0x02,
59012 +};
59013 +
59014 +enum {
59015 + GR_ID_ALLOW = 0x01,
59016 + GR_ID_DENY = 0x02,
59017 +};
59018 +
59019 +#define GR_CRASH_RES 31
59020 +#define GR_UIDTABLE_MAX 500
59021 +
59022 +/* begin resource learning section */
59023 +enum {
59024 + GR_RLIM_CPU_BUMP = 60,
59025 + GR_RLIM_FSIZE_BUMP = 50000,
59026 + GR_RLIM_DATA_BUMP = 10000,
59027 + GR_RLIM_STACK_BUMP = 1000,
59028 + GR_RLIM_CORE_BUMP = 10000,
59029 + GR_RLIM_RSS_BUMP = 500000,
59030 + GR_RLIM_NPROC_BUMP = 1,
59031 + GR_RLIM_NOFILE_BUMP = 5,
59032 + GR_RLIM_MEMLOCK_BUMP = 50000,
59033 + GR_RLIM_AS_BUMP = 500000,
59034 + GR_RLIM_LOCKS_BUMP = 2,
59035 + GR_RLIM_SIGPENDING_BUMP = 5,
59036 + GR_RLIM_MSGQUEUE_BUMP = 10000,
59037 + GR_RLIM_NICE_BUMP = 1,
59038 + GR_RLIM_RTPRIO_BUMP = 1,
59039 + GR_RLIM_RTTIME_BUMP = 1000000
59040 +};
59041 +
59042 +#endif
59043 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
59044 new file mode 100644
59045 index 0000000..da390f1
59046 --- /dev/null
59047 +++ b/include/linux/grinternal.h
59048 @@ -0,0 +1,221 @@
59049 +#ifndef __GRINTERNAL_H
59050 +#define __GRINTERNAL_H
59051 +
59052 +#ifdef CONFIG_GRKERNSEC
59053 +
59054 +#include <linux/fs.h>
59055 +#include <linux/mnt_namespace.h>
59056 +#include <linux/nsproxy.h>
59057 +#include <linux/gracl.h>
59058 +#include <linux/grdefs.h>
59059 +#include <linux/grmsg.h>
59060 +
59061 +void gr_add_learn_entry(const char *fmt, ...)
59062 + __attribute__ ((format (printf, 1, 2)));
59063 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59064 + const struct vfsmount *mnt);
59065 +__u32 gr_check_create(const struct dentry *new_dentry,
59066 + const struct dentry *parent,
59067 + const struct vfsmount *mnt, const __u32 mode);
59068 +int gr_check_protected_task(const struct task_struct *task);
59069 +__u32 to_gr_audit(const __u32 reqmode);
59070 +int gr_set_acls(const int type);
59071 +int gr_apply_subject_to_task(struct task_struct *task);
59072 +int gr_acl_is_enabled(void);
59073 +char gr_roletype_to_char(void);
59074 +
59075 +void gr_handle_alertkill(struct task_struct *task);
59076 +char *gr_to_filename(const struct dentry *dentry,
59077 + const struct vfsmount *mnt);
59078 +char *gr_to_filename1(const struct dentry *dentry,
59079 + const struct vfsmount *mnt);
59080 +char *gr_to_filename2(const struct dentry *dentry,
59081 + const struct vfsmount *mnt);
59082 +char *gr_to_filename3(const struct dentry *dentry,
59083 + const struct vfsmount *mnt);
59084 +
59085 +extern int grsec_enable_ptrace_readexec;
59086 +extern int grsec_enable_harden_ptrace;
59087 +extern int grsec_enable_link;
59088 +extern int grsec_enable_fifo;
59089 +extern int grsec_enable_execve;
59090 +extern int grsec_enable_shm;
59091 +extern int grsec_enable_execlog;
59092 +extern int grsec_enable_signal;
59093 +extern int grsec_enable_audit_ptrace;
59094 +extern int grsec_enable_forkfail;
59095 +extern int grsec_enable_time;
59096 +extern int grsec_enable_rofs;
59097 +extern int grsec_enable_chroot_shmat;
59098 +extern int grsec_enable_chroot_mount;
59099 +extern int grsec_enable_chroot_double;
59100 +extern int grsec_enable_chroot_pivot;
59101 +extern int grsec_enable_chroot_chdir;
59102 +extern int grsec_enable_chroot_chmod;
59103 +extern int grsec_enable_chroot_mknod;
59104 +extern int grsec_enable_chroot_fchdir;
59105 +extern int grsec_enable_chroot_nice;
59106 +extern int grsec_enable_chroot_execlog;
59107 +extern int grsec_enable_chroot_caps;
59108 +extern int grsec_enable_chroot_sysctl;
59109 +extern int grsec_enable_chroot_unix;
59110 +extern int grsec_enable_tpe;
59111 +extern int grsec_tpe_gid;
59112 +extern int grsec_enable_tpe_all;
59113 +extern int grsec_enable_tpe_invert;
59114 +extern int grsec_enable_socket_all;
59115 +extern int grsec_socket_all_gid;
59116 +extern int grsec_enable_socket_client;
59117 +extern int grsec_socket_client_gid;
59118 +extern int grsec_enable_socket_server;
59119 +extern int grsec_socket_server_gid;
59120 +extern int grsec_audit_gid;
59121 +extern int grsec_enable_group;
59122 +extern int grsec_enable_audit_textrel;
59123 +extern int grsec_enable_log_rwxmaps;
59124 +extern int grsec_enable_mount;
59125 +extern int grsec_enable_chdir;
59126 +extern int grsec_resource_logging;
59127 +extern int grsec_enable_blackhole;
59128 +extern int grsec_lastack_retries;
59129 +extern int grsec_enable_brute;
59130 +extern int grsec_lock;
59131 +
59132 +extern spinlock_t grsec_alert_lock;
59133 +extern unsigned long grsec_alert_wtime;
59134 +extern unsigned long grsec_alert_fyet;
59135 +
59136 +extern spinlock_t grsec_audit_lock;
59137 +
59138 +extern rwlock_t grsec_exec_file_lock;
59139 +
59140 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59141 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59142 + (tsk)->exec_file->f_vfsmnt) : "/")
59143 +
59144 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59145 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59146 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59147 +
59148 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59149 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
59150 + (tsk)->exec_file->f_vfsmnt) : "/")
59151 +
59152 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59153 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59154 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59155 +
59156 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59157 +
59158 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59159 +
59160 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59161 + (task)->pid, (cred)->uid, \
59162 + (cred)->euid, (cred)->gid, (cred)->egid, \
59163 + gr_parent_task_fullpath(task), \
59164 + (task)->real_parent->comm, (task)->real_parent->pid, \
59165 + (pcred)->uid, (pcred)->euid, \
59166 + (pcred)->gid, (pcred)->egid
59167 +
59168 +#define GR_CHROOT_CAPS {{ \
59169 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59170 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59171 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59172 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59173 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59174 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
59175 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
59176 +
59177 +#define security_learn(normal_msg,args...) \
59178 +({ \
59179 + read_lock(&grsec_exec_file_lock); \
59180 + gr_add_learn_entry(normal_msg "\n", ## args); \
59181 + read_unlock(&grsec_exec_file_lock); \
59182 +})
59183 +
59184 +enum {
59185 + GR_DO_AUDIT,
59186 + GR_DONT_AUDIT,
59187 + /* used for non-audit messages that we shouldn't kill the task on */
59188 + GR_DONT_AUDIT_GOOD
59189 +};
59190 +
59191 +enum {
59192 + GR_TTYSNIFF,
59193 + GR_RBAC,
59194 + GR_RBAC_STR,
59195 + GR_STR_RBAC,
59196 + GR_RBAC_MODE2,
59197 + GR_RBAC_MODE3,
59198 + GR_FILENAME,
59199 + GR_SYSCTL_HIDDEN,
59200 + GR_NOARGS,
59201 + GR_ONE_INT,
59202 + GR_ONE_INT_TWO_STR,
59203 + GR_ONE_STR,
59204 + GR_STR_INT,
59205 + GR_TWO_STR_INT,
59206 + GR_TWO_INT,
59207 + GR_TWO_U64,
59208 + GR_THREE_INT,
59209 + GR_FIVE_INT_TWO_STR,
59210 + GR_TWO_STR,
59211 + GR_THREE_STR,
59212 + GR_FOUR_STR,
59213 + GR_STR_FILENAME,
59214 + GR_FILENAME_STR,
59215 + GR_FILENAME_TWO_INT,
59216 + GR_FILENAME_TWO_INT_STR,
59217 + GR_TEXTREL,
59218 + GR_PTRACE,
59219 + GR_RESOURCE,
59220 + GR_CAP,
59221 + GR_SIG,
59222 + GR_SIG2,
59223 + GR_CRASH1,
59224 + GR_CRASH2,
59225 + GR_PSACCT,
59226 + GR_RWXMAP
59227 +};
59228 +
59229 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59230 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59231 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59232 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59233 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59234 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59235 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59236 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59237 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59238 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59239 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59240 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59241 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59242 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59243 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59244 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59245 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59246 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59247 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59248 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59249 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59250 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59251 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59252 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59253 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59254 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59255 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59256 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59257 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59258 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59259 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59260 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59261 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59262 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59263 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59264 +
59265 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59266 +
59267 +#endif
59268 +
59269 +#endif
59270 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59271 new file mode 100644
59272 index 0000000..ae576a1
59273 --- /dev/null
59274 +++ b/include/linux/grmsg.h
59275 @@ -0,0 +1,109 @@
59276 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59277 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59278 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59279 +#define GR_STOPMOD_MSG "denied modification of module state by "
59280 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59281 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59282 +#define GR_IOPERM_MSG "denied use of ioperm() by "
59283 +#define GR_IOPL_MSG "denied use of iopl() by "
59284 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59285 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59286 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59287 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59288 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59289 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59290 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59291 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59292 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59293 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59294 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59295 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59296 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59297 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59298 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59299 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59300 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59301 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59302 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59303 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59304 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59305 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59306 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59307 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59308 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59309 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59310 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
59311 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59312 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59313 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59314 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59315 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59316 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59317 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59318 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59319 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59320 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59321 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59322 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59323 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59324 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59325 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59326 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59327 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
59328 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59329 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59330 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59331 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59332 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59333 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59334 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59335 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59336 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59337 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59338 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59339 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59340 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59341 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59342 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59343 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59344 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59345 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59346 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59347 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59348 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59349 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59350 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59351 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59352 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59353 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59354 +#define GR_TIME_MSG "time set by "
59355 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59356 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59357 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59358 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59359 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59360 +#define GR_BIND_MSG "denied bind() by "
59361 +#define GR_CONNECT_MSG "denied connect() by "
59362 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59363 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59364 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59365 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59366 +#define GR_CAP_ACL_MSG "use of %s denied for "
59367 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59368 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59369 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59370 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59371 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59372 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59373 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59374 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59375 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59376 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59377 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59378 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59379 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59380 +#define GR_VM86_MSG "denied use of vm86 by "
59381 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59382 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59383 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59384 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
59385 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59386 new file mode 100644
59387 index 0000000..2ccf677
59388 --- /dev/null
59389 +++ b/include/linux/grsecurity.h
59390 @@ -0,0 +1,229 @@
59391 +#ifndef GR_SECURITY_H
59392 +#define GR_SECURITY_H
59393 +#include <linux/fs.h>
59394 +#include <linux/fs_struct.h>
59395 +#include <linux/binfmts.h>
59396 +#include <linux/gracl.h>
59397 +
59398 +/* notify of brain-dead configs */
59399 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59400 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59401 +#endif
59402 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59403 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59404 +#endif
59405 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59406 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59407 +#endif
59408 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59409 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59410 +#endif
59411 +
59412 +#include <linux/compat.h>
59413 +
59414 +struct user_arg_ptr {
59415 +#ifdef CONFIG_COMPAT
59416 + bool is_compat;
59417 +#endif
59418 + union {
59419 + const char __user *const __user *native;
59420 +#ifdef CONFIG_COMPAT
59421 + compat_uptr_t __user *compat;
59422 +#endif
59423 + } ptr;
59424 +};
59425 +
59426 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59427 +void gr_handle_brute_check(void);
59428 +void gr_handle_kernel_exploit(void);
59429 +int gr_process_user_ban(void);
59430 +
59431 +char gr_roletype_to_char(void);
59432 +
59433 +int gr_acl_enable_at_secure(void);
59434 +
59435 +int gr_check_user_change(int real, int effective, int fs);
59436 +int gr_check_group_change(int real, int effective, int fs);
59437 +
59438 +void gr_del_task_from_ip_table(struct task_struct *p);
59439 +
59440 +int gr_pid_is_chrooted(struct task_struct *p);
59441 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59442 +int gr_handle_chroot_nice(void);
59443 +int gr_handle_chroot_sysctl(const int op);
59444 +int gr_handle_chroot_setpriority(struct task_struct *p,
59445 + const int niceval);
59446 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59447 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59448 + const struct vfsmount *mnt);
59449 +void gr_handle_chroot_chdir(struct path *path);
59450 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59451 + const struct vfsmount *mnt, const int mode);
59452 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59453 + const struct vfsmount *mnt, const int mode);
59454 +int gr_handle_chroot_mount(const struct dentry *dentry,
59455 + const struct vfsmount *mnt,
59456 + const char *dev_name);
59457 +int gr_handle_chroot_pivot(void);
59458 +int gr_handle_chroot_unix(const pid_t pid);
59459 +
59460 +int gr_handle_rawio(const struct inode *inode);
59461 +
59462 +void gr_handle_ioperm(void);
59463 +void gr_handle_iopl(void);
59464 +
59465 +umode_t gr_acl_umask(void);
59466 +
59467 +int gr_tpe_allow(const struct file *file);
59468 +
59469 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59470 +void gr_clear_chroot_entries(struct task_struct *task);
59471 +
59472 +void gr_log_forkfail(const int retval);
59473 +void gr_log_timechange(void);
59474 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59475 +void gr_log_chdir(const struct dentry *dentry,
59476 + const struct vfsmount *mnt);
59477 +void gr_log_chroot_exec(const struct dentry *dentry,
59478 + const struct vfsmount *mnt);
59479 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59480 +void gr_log_remount(const char *devname, const int retval);
59481 +void gr_log_unmount(const char *devname, const int retval);
59482 +void gr_log_mount(const char *from, const char *to, const int retval);
59483 +void gr_log_textrel(struct vm_area_struct *vma);
59484 +void gr_log_rwxmmap(struct file *file);
59485 +void gr_log_rwxmprotect(struct file *file);
59486 +
59487 +int gr_handle_follow_link(const struct inode *parent,
59488 + const struct inode *inode,
59489 + const struct dentry *dentry,
59490 + const struct vfsmount *mnt);
59491 +int gr_handle_fifo(const struct dentry *dentry,
59492 + const struct vfsmount *mnt,
59493 + const struct dentry *dir, const int flag,
59494 + const int acc_mode);
59495 +int gr_handle_hardlink(const struct dentry *dentry,
59496 + const struct vfsmount *mnt,
59497 + struct inode *inode,
59498 + const int mode, const char *to);
59499 +
59500 +int gr_is_capable(const int cap);
59501 +int gr_is_capable_nolog(const int cap);
59502 +void gr_learn_resource(const struct task_struct *task, const int limit,
59503 + const unsigned long wanted, const int gt);
59504 +void gr_copy_label(struct task_struct *tsk);
59505 +void gr_handle_crash(struct task_struct *task, const int sig);
59506 +int gr_handle_signal(const struct task_struct *p, const int sig);
59507 +int gr_check_crash_uid(const uid_t uid);
59508 +int gr_check_protected_task(const struct task_struct *task);
59509 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59510 +int gr_acl_handle_mmap(const struct file *file,
59511 + const unsigned long prot);
59512 +int gr_acl_handle_mprotect(const struct file *file,
59513 + const unsigned long prot);
59514 +int gr_check_hidden_task(const struct task_struct *tsk);
59515 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59516 + const struct vfsmount *mnt);
59517 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59518 + const struct vfsmount *mnt);
59519 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59520 + const struct vfsmount *mnt, const int fmode);
59521 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59522 + const struct vfsmount *mnt, umode_t *mode);
59523 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59524 + const struct vfsmount *mnt);
59525 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59526 + const struct vfsmount *mnt);
59527 +int gr_handle_ptrace(struct task_struct *task, const long request);
59528 +int gr_handle_proc_ptrace(struct task_struct *task);
59529 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59530 + const struct vfsmount *mnt);
59531 +int gr_check_crash_exec(const struct file *filp);
59532 +int gr_acl_is_enabled(void);
59533 +void gr_set_kernel_label(struct task_struct *task);
59534 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59535 + const gid_t gid);
59536 +int gr_set_proc_label(const struct dentry *dentry,
59537 + const struct vfsmount *mnt,
59538 + const int unsafe_flags);
59539 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59540 + const struct vfsmount *mnt);
59541 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59542 + const struct vfsmount *mnt, int acc_mode);
59543 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59544 + const struct dentry *p_dentry,
59545 + const struct vfsmount *p_mnt,
59546 + int open_flags, int acc_mode, const int imode);
59547 +void gr_handle_create(const struct dentry *dentry,
59548 + const struct vfsmount *mnt);
59549 +void gr_handle_proc_create(const struct dentry *dentry,
59550 + const struct inode *inode);
59551 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59552 + const struct dentry *parent_dentry,
59553 + const struct vfsmount *parent_mnt,
59554 + const int mode);
59555 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59556 + const struct dentry *parent_dentry,
59557 + const struct vfsmount *parent_mnt);
59558 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59559 + const struct vfsmount *mnt);
59560 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59561 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59562 + const struct vfsmount *mnt);
59563 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59564 + const struct dentry *parent_dentry,
59565 + const struct vfsmount *parent_mnt,
59566 + const char *from);
59567 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59568 + const struct dentry *parent_dentry,
59569 + const struct vfsmount *parent_mnt,
59570 + const struct dentry *old_dentry,
59571 + const struct vfsmount *old_mnt, const char *to);
59572 +int gr_acl_handle_rename(struct dentry *new_dentry,
59573 + struct dentry *parent_dentry,
59574 + const struct vfsmount *parent_mnt,
59575 + struct dentry *old_dentry,
59576 + struct inode *old_parent_inode,
59577 + struct vfsmount *old_mnt, const char *newname);
59578 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59579 + struct dentry *old_dentry,
59580 + struct dentry *new_dentry,
59581 + struct vfsmount *mnt, const __u8 replace);
59582 +__u32 gr_check_link(const struct dentry *new_dentry,
59583 + const struct dentry *parent_dentry,
59584 + const struct vfsmount *parent_mnt,
59585 + const struct dentry *old_dentry,
59586 + const struct vfsmount *old_mnt);
59587 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59588 + const unsigned int namelen, const ino_t ino);
59589 +
59590 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59591 + const struct vfsmount *mnt);
59592 +void gr_acl_handle_exit(void);
59593 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59594 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59595 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59596 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59597 +void gr_audit_ptrace(struct task_struct *task);
59598 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59599 +
59600 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59601 +
59602 +#ifdef CONFIG_GRKERNSEC
59603 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59604 +void gr_handle_vm86(void);
59605 +void gr_handle_mem_readwrite(u64 from, u64 to);
59606 +
59607 +void gr_log_badprocpid(const char *entry);
59608 +
59609 +extern int grsec_enable_dmesg;
59610 +extern int grsec_disable_privio;
59611 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59612 +extern int grsec_enable_chroot_findtask;
59613 +#endif
59614 +#ifdef CONFIG_GRKERNSEC_SETXID
59615 +extern int grsec_enable_setxid;
59616 +#endif
59617 +#endif
59618 +
59619 +#endif
59620 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59621 new file mode 100644
59622 index 0000000..e7ffaaf
59623 --- /dev/null
59624 +++ b/include/linux/grsock.h
59625 @@ -0,0 +1,19 @@
59626 +#ifndef __GRSOCK_H
59627 +#define __GRSOCK_H
59628 +
59629 +extern void gr_attach_curr_ip(const struct sock *sk);
59630 +extern int gr_handle_sock_all(const int family, const int type,
59631 + const int protocol);
59632 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59633 +extern int gr_handle_sock_server_other(const struct sock *sck);
59634 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59635 +extern int gr_search_connect(struct socket * sock,
59636 + struct sockaddr_in * addr);
59637 +extern int gr_search_bind(struct socket * sock,
59638 + struct sockaddr_in * addr);
59639 +extern int gr_search_listen(struct socket * sock);
59640 +extern int gr_search_accept(struct socket * sock);
59641 +extern int gr_search_socket(const int domain, const int type,
59642 + const int protocol);
59643 +
59644 +#endif
59645 diff --git a/include/linux/hid.h b/include/linux/hid.h
59646 index c235e4e..f0cf7a0 100644
59647 --- a/include/linux/hid.h
59648 +++ b/include/linux/hid.h
59649 @@ -679,7 +679,7 @@ struct hid_ll_driver {
59650 unsigned int code, int value);
59651
59652 int (*parse)(struct hid_device *hdev);
59653 -};
59654 +} __no_const;
59655
59656 #define PM_HINT_FULLON 1<<5
59657 #define PM_HINT_NORMAL 1<<1
59658 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59659 index 3a93f73..b19d0b3 100644
59660 --- a/include/linux/highmem.h
59661 +++ b/include/linux/highmem.h
59662 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59663 kunmap_atomic(kaddr, KM_USER0);
59664 }
59665
59666 +static inline void sanitize_highpage(struct page *page)
59667 +{
59668 + void *kaddr;
59669 + unsigned long flags;
59670 +
59671 + local_irq_save(flags);
59672 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59673 + clear_page(kaddr);
59674 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59675 + local_irq_restore(flags);
59676 +}
59677 +
59678 static inline void zero_user_segments(struct page *page,
59679 unsigned start1, unsigned end1,
59680 unsigned start2, unsigned end2)
59681 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59682 index 07d103a..04ec65b 100644
59683 --- a/include/linux/i2c.h
59684 +++ b/include/linux/i2c.h
59685 @@ -364,6 +364,7 @@ struct i2c_algorithm {
59686 /* To determine what the adapter supports */
59687 u32 (*functionality) (struct i2c_adapter *);
59688 };
59689 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59690
59691 /*
59692 * i2c_adapter is the structure used to identify a physical i2c bus along
59693 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59694 index a6deef4..c56a7f2 100644
59695 --- a/include/linux/i2o.h
59696 +++ b/include/linux/i2o.h
59697 @@ -564,7 +564,7 @@ struct i2o_controller {
59698 struct i2o_device *exec; /* Executive */
59699 #if BITS_PER_LONG == 64
59700 spinlock_t context_list_lock; /* lock for context_list */
59701 - atomic_t context_list_counter; /* needed for unique contexts */
59702 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59703 struct list_head context_list; /* list of context id's
59704 and pointers */
59705 #endif
59706 diff --git a/include/linux/init.h b/include/linux/init.h
59707 index 9146f39..885354d 100644
59708 --- a/include/linux/init.h
59709 +++ b/include/linux/init.h
59710 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59711
59712 /* Each module must use one module_init(). */
59713 #define module_init(initfn) \
59714 - static inline initcall_t __inittest(void) \
59715 + static inline __used initcall_t __inittest(void) \
59716 { return initfn; } \
59717 int init_module(void) __attribute__((alias(#initfn)));
59718
59719 /* This is only required if you want to be unloadable. */
59720 #define module_exit(exitfn) \
59721 - static inline exitcall_t __exittest(void) \
59722 + static inline __used exitcall_t __exittest(void) \
59723 { return exitfn; } \
59724 void cleanup_module(void) __attribute__((alias(#exitfn)));
59725
59726 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59727 index 32574ee..00d4ef1 100644
59728 --- a/include/linux/init_task.h
59729 +++ b/include/linux/init_task.h
59730 @@ -128,6 +128,12 @@ extern struct cred init_cred;
59731
59732 #define INIT_TASK_COMM "swapper"
59733
59734 +#ifdef CONFIG_X86
59735 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59736 +#else
59737 +#define INIT_TASK_THREAD_INFO
59738 +#endif
59739 +
59740 /*
59741 * INIT_TASK is used to set up the first task table, touch at
59742 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59743 @@ -166,6 +172,7 @@ extern struct cred init_cred;
59744 RCU_INIT_POINTER(.cred, &init_cred), \
59745 .comm = INIT_TASK_COMM, \
59746 .thread = INIT_THREAD, \
59747 + INIT_TASK_THREAD_INFO \
59748 .fs = &init_fs, \
59749 .files = &init_files, \
59750 .signal = &init_signals, \
59751 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59752 index e6ca56d..8583707 100644
59753 --- a/include/linux/intel-iommu.h
59754 +++ b/include/linux/intel-iommu.h
59755 @@ -296,7 +296,7 @@ struct iommu_flush {
59756 u8 fm, u64 type);
59757 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59758 unsigned int size_order, u64 type);
59759 -};
59760 +} __no_const;
59761
59762 enum {
59763 SR_DMAR_FECTL_REG,
59764 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59765 index a64b00e..464d8bc 100644
59766 --- a/include/linux/interrupt.h
59767 +++ b/include/linux/interrupt.h
59768 @@ -441,7 +441,7 @@ enum
59769 /* map softirq index to softirq name. update 'softirq_to_name' in
59770 * kernel/softirq.c when adding a new softirq.
59771 */
59772 -extern char *softirq_to_name[NR_SOFTIRQS];
59773 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59774
59775 /* softirq mask and active fields moved to irq_cpustat_t in
59776 * asm/hardirq.h to get better cache usage. KAO
59777 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59778
59779 struct softirq_action
59780 {
59781 - void (*action)(struct softirq_action *);
59782 + void (*action)(void);
59783 };
59784
59785 asmlinkage void do_softirq(void);
59786 asmlinkage void __do_softirq(void);
59787 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59788 +extern void open_softirq(int nr, void (*action)(void));
59789 extern void softirq_init(void);
59790 static inline void __raise_softirq_irqoff(unsigned int nr)
59791 {
59792 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59793 index 3875719..4cd454c 100644
59794 --- a/include/linux/kallsyms.h
59795 +++ b/include/linux/kallsyms.h
59796 @@ -15,7 +15,8 @@
59797
59798 struct module;
59799
59800 -#ifdef CONFIG_KALLSYMS
59801 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59802 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59803 /* Lookup the address for a symbol. Returns 0 if not found. */
59804 unsigned long kallsyms_lookup_name(const char *name);
59805
59806 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59807 /* Stupid that this does nothing, but I didn't create this mess. */
59808 #define __print_symbol(fmt, addr)
59809 #endif /*CONFIG_KALLSYMS*/
59810 +#else /* when included by kallsyms.c, vsnprintf.c, or
59811 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59812 +extern void __print_symbol(const char *fmt, unsigned long address);
59813 +extern int sprint_backtrace(char *buffer, unsigned long address);
59814 +extern int sprint_symbol(char *buffer, unsigned long address);
59815 +const char *kallsyms_lookup(unsigned long addr,
59816 + unsigned long *symbolsize,
59817 + unsigned long *offset,
59818 + char **modname, char *namebuf);
59819 +#endif
59820
59821 /* This macro allows us to keep printk typechecking */
59822 static __printf(1, 2)
59823 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59824 index fa39183..40160be 100644
59825 --- a/include/linux/kgdb.h
59826 +++ b/include/linux/kgdb.h
59827 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59828 extern int kgdb_io_module_registered;
59829
59830 extern atomic_t kgdb_setting_breakpoint;
59831 -extern atomic_t kgdb_cpu_doing_single_step;
59832 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59833
59834 extern struct task_struct *kgdb_usethread;
59835 extern struct task_struct *kgdb_contthread;
59836 @@ -251,7 +251,7 @@ struct kgdb_arch {
59837 void (*disable_hw_break)(struct pt_regs *regs);
59838 void (*remove_all_hw_break)(void);
59839 void (*correct_hw_break)(void);
59840 -};
59841 +} __do_const;
59842
59843 /**
59844 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59845 @@ -276,7 +276,7 @@ struct kgdb_io {
59846 void (*pre_exception) (void);
59847 void (*post_exception) (void);
59848 int is_console;
59849 -};
59850 +} __do_const;
59851
59852 extern struct kgdb_arch arch_kgdb_ops;
59853
59854 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59855 index b16f653..eb908f4 100644
59856 --- a/include/linux/kmod.h
59857 +++ b/include/linux/kmod.h
59858 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59859 * usually useless though. */
59860 extern __printf(2, 3)
59861 int __request_module(bool wait, const char *name, ...);
59862 +extern __printf(3, 4)
59863 +int ___request_module(bool wait, char *param_name, const char *name, ...);
59864 #define request_module(mod...) __request_module(true, mod)
59865 #define request_module_nowait(mod...) __request_module(false, mod)
59866 #define try_then_request_module(x, mod...) \
59867 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59868 index d526231..086e89b 100644
59869 --- a/include/linux/kvm_host.h
59870 +++ b/include/linux/kvm_host.h
59871 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59872 void vcpu_load(struct kvm_vcpu *vcpu);
59873 void vcpu_put(struct kvm_vcpu *vcpu);
59874
59875 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59876 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59877 struct module *module);
59878 void kvm_exit(void);
59879
59880 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59881 struct kvm_guest_debug *dbg);
59882 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59883
59884 -int kvm_arch_init(void *opaque);
59885 +int kvm_arch_init(const void *opaque);
59886 void kvm_arch_exit(void);
59887
59888 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59889 diff --git a/include/linux/libata.h b/include/linux/libata.h
59890 index cafc09a..d7e7829 100644
59891 --- a/include/linux/libata.h
59892 +++ b/include/linux/libata.h
59893 @@ -909,7 +909,7 @@ struct ata_port_operations {
59894 * fields must be pointers.
59895 */
59896 const struct ata_port_operations *inherits;
59897 -};
59898 +} __do_const;
59899
59900 struct ata_port_info {
59901 unsigned long flags;
59902 diff --git a/include/linux/mca.h b/include/linux/mca.h
59903 index 3797270..7765ede 100644
59904 --- a/include/linux/mca.h
59905 +++ b/include/linux/mca.h
59906 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59907 int region);
59908 void * (*mca_transform_memory)(struct mca_device *,
59909 void *memory);
59910 -};
59911 +} __no_const;
59912
59913 struct mca_bus {
59914 u64 default_dma_mask;
59915 diff --git a/include/linux/memory.h b/include/linux/memory.h
59916 index 935699b..11042cc 100644
59917 --- a/include/linux/memory.h
59918 +++ b/include/linux/memory.h
59919 @@ -144,7 +144,7 @@ struct memory_accessor {
59920 size_t count);
59921 ssize_t (*write)(struct memory_accessor *, const char *buf,
59922 off_t offset, size_t count);
59923 -};
59924 +} __no_const;
59925
59926 /*
59927 * Kernel text modification mutex, used for code patching. Users of this lock
59928 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59929 index 9970337..9444122 100644
59930 --- a/include/linux/mfd/abx500.h
59931 +++ b/include/linux/mfd/abx500.h
59932 @@ -188,6 +188,7 @@ struct abx500_ops {
59933 int (*event_registers_startup_state_get) (struct device *, u8 *);
59934 int (*startup_irq_enabled) (struct device *, unsigned int);
59935 };
59936 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59937
59938 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59939 void abx500_remove_ops(struct device *dev);
59940 diff --git a/include/linux/mm.h b/include/linux/mm.h
59941 index 4baadd1..2e0b45e 100644
59942 --- a/include/linux/mm.h
59943 +++ b/include/linux/mm.h
59944 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59945
59946 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59947 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59948 +
59949 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59950 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59951 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59952 +#else
59953 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59954 +#endif
59955 +
59956 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59957 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59958
59959 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59960 int set_page_dirty_lock(struct page *page);
59961 int clear_page_dirty_for_io(struct page *page);
59962
59963 -/* Is the vma a continuation of the stack vma above it? */
59964 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59965 -{
59966 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59967 -}
59968 -
59969 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59970 - unsigned long addr)
59971 -{
59972 - return (vma->vm_flags & VM_GROWSDOWN) &&
59973 - (vma->vm_start == addr) &&
59974 - !vma_growsdown(vma->vm_prev, addr);
59975 -}
59976 -
59977 -/* Is the vma a continuation of the stack vma below it? */
59978 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59979 -{
59980 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59981 -}
59982 -
59983 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59984 - unsigned long addr)
59985 -{
59986 - return (vma->vm_flags & VM_GROWSUP) &&
59987 - (vma->vm_end == addr) &&
59988 - !vma_growsup(vma->vm_next, addr);
59989 -}
59990 -
59991 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59992 unsigned long old_addr, struct vm_area_struct *new_vma,
59993 unsigned long new_addr, unsigned long len);
59994 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59995 }
59996 #endif
59997
59998 +#ifdef CONFIG_MMU
59999 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
60000 +#else
60001 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
60002 +{
60003 + return __pgprot(0);
60004 +}
60005 +#endif
60006 +
60007 int vma_wants_writenotify(struct vm_area_struct *vma);
60008
60009 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
60010 @@ -1419,6 +1407,7 @@ out:
60011 }
60012
60013 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
60014 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
60015
60016 extern unsigned long do_brk(unsigned long, unsigned long);
60017
60018 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
60019 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
60020 struct vm_area_struct **pprev);
60021
60022 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60023 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60024 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60025 +
60026 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60027 NULL if none. Assume start_addr < end_addr. */
60028 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60029 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
60030 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60031 }
60032
60033 -#ifdef CONFIG_MMU
60034 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
60035 -#else
60036 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
60037 -{
60038 - return __pgprot(0);
60039 -}
60040 -#endif
60041 -
60042 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60043 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60044 unsigned long pfn, unsigned long size, pgprot_t);
60045 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
60046 extern int sysctl_memory_failure_early_kill;
60047 extern int sysctl_memory_failure_recovery;
60048 extern void shake_page(struct page *p, int access);
60049 -extern atomic_long_t mce_bad_pages;
60050 +extern atomic_long_unchecked_t mce_bad_pages;
60051 extern int soft_offline_page(struct page *page, int flags);
60052
60053 extern void dump_page(struct page *page);
60054 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
60055 unsigned int pages_per_huge_page);
60056 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
60057
60058 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60059 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60060 +#else
60061 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60062 +#endif
60063 +
60064 #endif /* __KERNEL__ */
60065 #endif /* _LINUX_MM_H */
60066 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
60067 index 5b42f1b..759e4b4 100644
60068 --- a/include/linux/mm_types.h
60069 +++ b/include/linux/mm_types.h
60070 @@ -253,6 +253,8 @@ struct vm_area_struct {
60071 #ifdef CONFIG_NUMA
60072 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60073 #endif
60074 +
60075 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60076 };
60077
60078 struct core_thread {
60079 @@ -389,6 +391,24 @@ struct mm_struct {
60080 #ifdef CONFIG_CPUMASK_OFFSTACK
60081 struct cpumask cpumask_allocation;
60082 #endif
60083 +
60084 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60085 + unsigned long pax_flags;
60086 +#endif
60087 +
60088 +#ifdef CONFIG_PAX_DLRESOLVE
60089 + unsigned long call_dl_resolve;
60090 +#endif
60091 +
60092 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60093 + unsigned long call_syscall;
60094 +#endif
60095 +
60096 +#ifdef CONFIG_PAX_ASLR
60097 + unsigned long delta_mmap; /* randomized offset */
60098 + unsigned long delta_stack; /* randomized offset */
60099 +#endif
60100 +
60101 };
60102
60103 static inline void mm_init_cpumask(struct mm_struct *mm)
60104 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
60105 index 1d1b1e1..2a13c78 100644
60106 --- a/include/linux/mmu_notifier.h
60107 +++ b/include/linux/mmu_notifier.h
60108 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
60109 */
60110 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60111 ({ \
60112 - pte_t __pte; \
60113 + pte_t ___pte; \
60114 struct vm_area_struct *___vma = __vma; \
60115 unsigned long ___address = __address; \
60116 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60117 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60118 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60119 - __pte; \
60120 + ___pte; \
60121 })
60122
60123 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
60124 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
60125 index 188cb2f..d78409b 100644
60126 --- a/include/linux/mmzone.h
60127 +++ b/include/linux/mmzone.h
60128 @@ -369,7 +369,7 @@ struct zone {
60129 unsigned long flags; /* zone flags, see below */
60130
60131 /* Zone statistics */
60132 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60133 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60134
60135 /*
60136 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
60137 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
60138 index 468819c..17b9db3 100644
60139 --- a/include/linux/mod_devicetable.h
60140 +++ b/include/linux/mod_devicetable.h
60141 @@ -12,7 +12,7 @@
60142 typedef unsigned long kernel_ulong_t;
60143 #endif
60144
60145 -#define PCI_ANY_ID (~0)
60146 +#define PCI_ANY_ID ((__u16)~0)
60147
60148 struct pci_device_id {
60149 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60150 @@ -131,7 +131,7 @@ struct usb_device_id {
60151 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60152 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60153
60154 -#define HID_ANY_ID (~0)
60155 +#define HID_ANY_ID (~0U)
60156
60157 struct hid_device_id {
60158 __u16 bus;
60159 diff --git a/include/linux/module.h b/include/linux/module.h
60160 index 3cb7839..511cb87 100644
60161 --- a/include/linux/module.h
60162 +++ b/include/linux/module.h
60163 @@ -17,6 +17,7 @@
60164 #include <linux/moduleparam.h>
60165 #include <linux/tracepoint.h>
60166 #include <linux/export.h>
60167 +#include <linux/fs.h>
60168
60169 #include <linux/percpu.h>
60170 #include <asm/module.h>
60171 @@ -261,19 +262,16 @@ struct module
60172 int (*init)(void);
60173
60174 /* If this is non-NULL, vfree after init() returns */
60175 - void *module_init;
60176 + void *module_init_rx, *module_init_rw;
60177
60178 /* Here is the actual code + data, vfree'd on unload. */
60179 - void *module_core;
60180 + void *module_core_rx, *module_core_rw;
60181
60182 /* Here are the sizes of the init and core sections */
60183 - unsigned int init_size, core_size;
60184 + unsigned int init_size_rw, core_size_rw;
60185
60186 /* The size of the executable code in each section. */
60187 - unsigned int init_text_size, core_text_size;
60188 -
60189 - /* Size of RO sections of the module (text+rodata) */
60190 - unsigned int init_ro_size, core_ro_size;
60191 + unsigned int init_size_rx, core_size_rx;
60192
60193 /* Arch-specific module values */
60194 struct mod_arch_specific arch;
60195 @@ -329,6 +327,10 @@ struct module
60196 #ifdef CONFIG_EVENT_TRACING
60197 struct ftrace_event_call **trace_events;
60198 unsigned int num_trace_events;
60199 + struct file_operations trace_id;
60200 + struct file_operations trace_enable;
60201 + struct file_operations trace_format;
60202 + struct file_operations trace_filter;
60203 #endif
60204 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60205 unsigned int num_ftrace_callsites;
60206 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
60207 bool is_module_percpu_address(unsigned long addr);
60208 bool is_module_text_address(unsigned long addr);
60209
60210 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60211 +{
60212 +
60213 +#ifdef CONFIG_PAX_KERNEXEC
60214 + if (ktla_ktva(addr) >= (unsigned long)start &&
60215 + ktla_ktva(addr) < (unsigned long)start + size)
60216 + return 1;
60217 +#endif
60218 +
60219 + return ((void *)addr >= start && (void *)addr < start + size);
60220 +}
60221 +
60222 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60223 +{
60224 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60225 +}
60226 +
60227 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60228 +{
60229 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60230 +}
60231 +
60232 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60233 +{
60234 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60235 +}
60236 +
60237 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60238 +{
60239 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60240 +}
60241 +
60242 static inline int within_module_core(unsigned long addr, struct module *mod)
60243 {
60244 - return (unsigned long)mod->module_core <= addr &&
60245 - addr < (unsigned long)mod->module_core + mod->core_size;
60246 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60247 }
60248
60249 static inline int within_module_init(unsigned long addr, struct module *mod)
60250 {
60251 - return (unsigned long)mod->module_init <= addr &&
60252 - addr < (unsigned long)mod->module_init + mod->init_size;
60253 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60254 }
60255
60256 /* Search for module by name: must hold module_mutex. */
60257 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60258 index b2be02e..6a9fdb1 100644
60259 --- a/include/linux/moduleloader.h
60260 +++ b/include/linux/moduleloader.h
60261 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60262 sections. Returns NULL on failure. */
60263 void *module_alloc(unsigned long size);
60264
60265 +#ifdef CONFIG_PAX_KERNEXEC
60266 +void *module_alloc_exec(unsigned long size);
60267 +#else
60268 +#define module_alloc_exec(x) module_alloc(x)
60269 +#endif
60270 +
60271 /* Free memory returned from module_alloc. */
60272 void module_free(struct module *mod, void *module_region);
60273
60274 +#ifdef CONFIG_PAX_KERNEXEC
60275 +void module_free_exec(struct module *mod, void *module_region);
60276 +#else
60277 +#define module_free_exec(x, y) module_free((x), (y))
60278 +#endif
60279 +
60280 /* Apply the given relocation to the (simplified) ELF. Return -error
60281 or 0. */
60282 int apply_relocate(Elf_Shdr *sechdrs,
60283 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60284 index 7939f63..ec6df57 100644
60285 --- a/include/linux/moduleparam.h
60286 +++ b/include/linux/moduleparam.h
60287 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60288 * @len is usually just sizeof(string).
60289 */
60290 #define module_param_string(name, string, len, perm) \
60291 - static const struct kparam_string __param_string_##name \
60292 + static const struct kparam_string __param_string_##name __used \
60293 = { len, string }; \
60294 __module_param_call(MODULE_PARAM_PREFIX, name, \
60295 &param_ops_string, \
60296 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60297 * module_param_named() for why this might be necessary.
60298 */
60299 #define module_param_array_named(name, array, type, nump, perm) \
60300 - static const struct kparam_array __param_arr_##name \
60301 + static const struct kparam_array __param_arr_##name __used \
60302 = { .max = ARRAY_SIZE(array), .num = nump, \
60303 .ops = &param_ops_##type, \
60304 .elemsize = sizeof(array[0]), .elem = array }; \
60305 diff --git a/include/linux/namei.h b/include/linux/namei.h
60306 index ffc0213..2c1f2cb 100644
60307 --- a/include/linux/namei.h
60308 +++ b/include/linux/namei.h
60309 @@ -24,7 +24,7 @@ struct nameidata {
60310 unsigned seq;
60311 int last_type;
60312 unsigned depth;
60313 - char *saved_names[MAX_NESTED_LINKS + 1];
60314 + const char *saved_names[MAX_NESTED_LINKS + 1];
60315
60316 /* Intent data */
60317 union {
60318 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60319 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60320 extern void unlock_rename(struct dentry *, struct dentry *);
60321
60322 -static inline void nd_set_link(struct nameidata *nd, char *path)
60323 +static inline void nd_set_link(struct nameidata *nd, const char *path)
60324 {
60325 nd->saved_names[nd->depth] = path;
60326 }
60327
60328 -static inline char *nd_get_link(struct nameidata *nd)
60329 +static inline const char *nd_get_link(const struct nameidata *nd)
60330 {
60331 return nd->saved_names[nd->depth];
60332 }
60333 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60334 index a82ad4d..90d15b7 100644
60335 --- a/include/linux/netdevice.h
60336 +++ b/include/linux/netdevice.h
60337 @@ -949,6 +949,7 @@ struct net_device_ops {
60338 int (*ndo_set_features)(struct net_device *dev,
60339 u32 features);
60340 };
60341 +typedef struct net_device_ops __no_const net_device_ops_no_const;
60342
60343 /*
60344 * The DEVICE structure.
60345 @@ -1088,7 +1089,7 @@ struct net_device {
60346 int iflink;
60347
60348 struct net_device_stats stats;
60349 - atomic_long_t rx_dropped; /* dropped packets by core network
60350 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60351 * Do not use this in drivers.
60352 */
60353
60354 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60355 new file mode 100644
60356 index 0000000..33f4af8
60357 --- /dev/null
60358 +++ b/include/linux/netfilter/xt_gradm.h
60359 @@ -0,0 +1,9 @@
60360 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
60361 +#define _LINUX_NETFILTER_XT_GRADM_H 1
60362 +
60363 +struct xt_gradm_mtinfo {
60364 + __u16 flags;
60365 + __u16 invflags;
60366 +};
60367 +
60368 +#endif
60369 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60370 index c65a18a..0c05f3a 100644
60371 --- a/include/linux/of_pdt.h
60372 +++ b/include/linux/of_pdt.h
60373 @@ -32,7 +32,7 @@ struct of_pdt_ops {
60374
60375 /* return 0 on success; fill in 'len' with number of bytes in path */
60376 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60377 -};
60378 +} __no_const;
60379
60380 extern void *prom_early_alloc(unsigned long size);
60381
60382 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60383 index a4c5624..79d6d88 100644
60384 --- a/include/linux/oprofile.h
60385 +++ b/include/linux/oprofile.h
60386 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60387 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60388 char const * name, ulong * val);
60389
60390 -/** Create a file for read-only access to an atomic_t. */
60391 +/** Create a file for read-only access to an atomic_unchecked_t. */
60392 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60393 - char const * name, atomic_t * val);
60394 + char const * name, atomic_unchecked_t * val);
60395
60396 /** create a directory */
60397 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60398 diff --git a/include/linux/padata.h b/include/linux/padata.h
60399 index 4633b2f..988bc08 100644
60400 --- a/include/linux/padata.h
60401 +++ b/include/linux/padata.h
60402 @@ -129,7 +129,7 @@ struct parallel_data {
60403 struct padata_instance *pinst;
60404 struct padata_parallel_queue __percpu *pqueue;
60405 struct padata_serial_queue __percpu *squeue;
60406 - atomic_t seq_nr;
60407 + atomic_unchecked_t seq_nr;
60408 atomic_t reorder_objects;
60409 atomic_t refcnt;
60410 unsigned int max_seq_nr;
60411 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60412 index b1f8912..c955bff 100644
60413 --- a/include/linux/perf_event.h
60414 +++ b/include/linux/perf_event.h
60415 @@ -748,8 +748,8 @@ struct perf_event {
60416
60417 enum perf_event_active_state state;
60418 unsigned int attach_state;
60419 - local64_t count;
60420 - atomic64_t child_count;
60421 + local64_t count; /* PaX: fix it one day */
60422 + atomic64_unchecked_t child_count;
60423
60424 /*
60425 * These are the total time in nanoseconds that the event
60426 @@ -800,8 +800,8 @@ struct perf_event {
60427 * These accumulate total time (in nanoseconds) that children
60428 * events have been enabled and running, respectively.
60429 */
60430 - atomic64_t child_total_time_enabled;
60431 - atomic64_t child_total_time_running;
60432 + atomic64_unchecked_t child_total_time_enabled;
60433 + atomic64_unchecked_t child_total_time_running;
60434
60435 /*
60436 * Protect attach/detach and child_list:
60437 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60438 index 77257c9..51d473a 100644
60439 --- a/include/linux/pipe_fs_i.h
60440 +++ b/include/linux/pipe_fs_i.h
60441 @@ -46,9 +46,9 @@ struct pipe_buffer {
60442 struct pipe_inode_info {
60443 wait_queue_head_t wait;
60444 unsigned int nrbufs, curbuf, buffers;
60445 - unsigned int readers;
60446 - unsigned int writers;
60447 - unsigned int waiting_writers;
60448 + atomic_t readers;
60449 + atomic_t writers;
60450 + atomic_t waiting_writers;
60451 unsigned int r_counter;
60452 unsigned int w_counter;
60453 struct page *tmp_page;
60454 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60455 index d3085e7..fd01052 100644
60456 --- a/include/linux/pm_runtime.h
60457 +++ b/include/linux/pm_runtime.h
60458 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60459
60460 static inline void pm_runtime_mark_last_busy(struct device *dev)
60461 {
60462 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60463 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60464 }
60465
60466 #else /* !CONFIG_PM_RUNTIME */
60467 diff --git a/include/linux/poison.h b/include/linux/poison.h
60468 index 79159de..f1233a9 100644
60469 --- a/include/linux/poison.h
60470 +++ b/include/linux/poison.h
60471 @@ -19,8 +19,8 @@
60472 * under normal circumstances, used to verify that nobody uses
60473 * non-initialized list entries.
60474 */
60475 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60476 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60477 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60478 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60479
60480 /********** include/linux/timer.h **********/
60481 /*
60482 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60483 index 58969b2..ead129b 100644
60484 --- a/include/linux/preempt.h
60485 +++ b/include/linux/preempt.h
60486 @@ -123,7 +123,7 @@ struct preempt_ops {
60487 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60488 void (*sched_out)(struct preempt_notifier *notifier,
60489 struct task_struct *next);
60490 -};
60491 +} __no_const;
60492
60493 /**
60494 * preempt_notifier - key for installing preemption notifiers
60495 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60496 index 643b96c..ef55a9c 100644
60497 --- a/include/linux/proc_fs.h
60498 +++ b/include/linux/proc_fs.h
60499 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60500 return proc_create_data(name, mode, parent, proc_fops, NULL);
60501 }
60502
60503 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60504 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60505 +{
60506 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60507 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60508 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60509 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60510 +#else
60511 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60512 +#endif
60513 +}
60514 +
60515 +
60516 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60517 mode_t mode, struct proc_dir_entry *base,
60518 read_proc_t *read_proc, void * data)
60519 @@ -258,7 +271,7 @@ union proc_op {
60520 int (*proc_show)(struct seq_file *m,
60521 struct pid_namespace *ns, struct pid *pid,
60522 struct task_struct *task);
60523 -};
60524 +} __no_const;
60525
60526 struct ctl_table_header;
60527 struct ctl_table;
60528 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60529 index 800f113..e9ee2e3 100644
60530 --- a/include/linux/ptrace.h
60531 +++ b/include/linux/ptrace.h
60532 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60533 extern void exit_ptrace(struct task_struct *tracer);
60534 #define PTRACE_MODE_READ 1
60535 #define PTRACE_MODE_ATTACH 2
60536 -/* Returns 0 on success, -errno on denial. */
60537 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60538 /* Returns true on success, false on denial. */
60539 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60540 +/* Returns true on success, false on denial. */
60541 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60542 +/* Returns true on success, false on denial. */
60543 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60544
60545 static inline int ptrace_reparented(struct task_struct *child)
60546 {
60547 diff --git a/include/linux/random.h b/include/linux/random.h
60548 index 8f74538..02a1012 100644
60549 --- a/include/linux/random.h
60550 +++ b/include/linux/random.h
60551 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60552
60553 u32 prandom32(struct rnd_state *);
60554
60555 +static inline unsigned long pax_get_random_long(void)
60556 +{
60557 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60558 +}
60559 +
60560 /*
60561 * Handle minimum values for seeds
60562 */
60563 static inline u32 __seed(u32 x, u32 m)
60564 {
60565 - return (x < m) ? x + m : x;
60566 + return (x <= m) ? x + m + 1 : x;
60567 }
60568
60569 /**
60570 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60571 index e0879a7..a12f962 100644
60572 --- a/include/linux/reboot.h
60573 +++ b/include/linux/reboot.h
60574 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60575 * Architecture-specific implementations of sys_reboot commands.
60576 */
60577
60578 -extern void machine_restart(char *cmd);
60579 -extern void machine_halt(void);
60580 -extern void machine_power_off(void);
60581 +extern void machine_restart(char *cmd) __noreturn;
60582 +extern void machine_halt(void) __noreturn;
60583 +extern void machine_power_off(void) __noreturn;
60584
60585 extern void machine_shutdown(void);
60586 struct pt_regs;
60587 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60588 */
60589
60590 extern void kernel_restart_prepare(char *cmd);
60591 -extern void kernel_restart(char *cmd);
60592 -extern void kernel_halt(void);
60593 -extern void kernel_power_off(void);
60594 +extern void kernel_restart(char *cmd) __noreturn;
60595 +extern void kernel_halt(void) __noreturn;
60596 +extern void kernel_power_off(void) __noreturn;
60597
60598 extern int C_A_D; /* for sysctl */
60599 void ctrl_alt_del(void);
60600 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60601 * Emergency restart, callable from an interrupt handler.
60602 */
60603
60604 -extern void emergency_restart(void);
60605 +extern void emergency_restart(void) __noreturn;
60606 #include <asm/emergency-restart.h>
60607
60608 #endif
60609 diff --git a/include/linux/regset.h b/include/linux/regset.h
60610 index 8abee65..5150fd1 100644
60611 --- a/include/linux/regset.h
60612 +++ b/include/linux/regset.h
60613 @@ -335,6 +335,9 @@ static inline int copy_regset_to_user(struct task_struct *target,
60614 {
60615 const struct user_regset *regset = &view->regsets[setno];
60616
60617 + if (!regset->get)
60618 + return -EOPNOTSUPP;
60619 +
60620 if (!access_ok(VERIFY_WRITE, data, size))
60621 return -EIO;
60622
60623 @@ -358,6 +361,9 @@ static inline int copy_regset_from_user(struct task_struct *target,
60624 {
60625 const struct user_regset *regset = &view->regsets[setno];
60626
60627 + if (!regset->set)
60628 + return -EOPNOTSUPP;
60629 +
60630 if (!access_ok(VERIFY_READ, data, size))
60631 return -EIO;
60632
60633 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60634 index 96d465f..b084e05 100644
60635 --- a/include/linux/reiserfs_fs.h
60636 +++ b/include/linux/reiserfs_fs.h
60637 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60638 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60639
60640 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60641 -#define get_generation(s) atomic_read (&fs_generation(s))
60642 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60643 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60644 #define __fs_changed(gen,s) (gen != get_generation (s))
60645 #define fs_changed(gen,s) \
60646 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60647 index 52c83b6..18ed7eb 100644
60648 --- a/include/linux/reiserfs_fs_sb.h
60649 +++ b/include/linux/reiserfs_fs_sb.h
60650 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60651 /* Comment? -Hans */
60652 wait_queue_head_t s_wait;
60653 /* To be obsoleted soon by per buffer seals.. -Hans */
60654 - atomic_t s_generation_counter; // increased by one every time the
60655 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60656 // tree gets re-balanced
60657 unsigned long s_properties; /* File system properties. Currently holds
60658 on-disk FS format */
60659 diff --git a/include/linux/relay.h b/include/linux/relay.h
60660 index 14a86bc..17d0700 100644
60661 --- a/include/linux/relay.h
60662 +++ b/include/linux/relay.h
60663 @@ -159,7 +159,7 @@ struct rchan_callbacks
60664 * The callback should return 0 if successful, negative if not.
60665 */
60666 int (*remove_buf_file)(struct dentry *dentry);
60667 -};
60668 +} __no_const;
60669
60670 /*
60671 * CONFIG_RELAY kernel API, kernel/relay.c
60672 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60673 index c6c6084..5bf1212 100644
60674 --- a/include/linux/rfkill.h
60675 +++ b/include/linux/rfkill.h
60676 @@ -147,6 +147,7 @@ struct rfkill_ops {
60677 void (*query)(struct rfkill *rfkill, void *data);
60678 int (*set_block)(void *data, bool blocked);
60679 };
60680 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60681
60682 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60683 /**
60684 diff --git a/include/linux/rio.h b/include/linux/rio.h
60685 index 4d50611..c6858a2 100644
60686 --- a/include/linux/rio.h
60687 +++ b/include/linux/rio.h
60688 @@ -315,7 +315,7 @@ struct rio_ops {
60689 int mbox, void *buffer, size_t len);
60690 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60691 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60692 -};
60693 +} __no_const;
60694
60695 #define RIO_RESOURCE_MEM 0x00000100
60696 #define RIO_RESOURCE_DOORBELL 0x00000200
60697 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60698 index 2148b12..519b820 100644
60699 --- a/include/linux/rmap.h
60700 +++ b/include/linux/rmap.h
60701 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60702 void anon_vma_init(void); /* create anon_vma_cachep */
60703 int anon_vma_prepare(struct vm_area_struct *);
60704 void unlink_anon_vmas(struct vm_area_struct *);
60705 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60706 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60707 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60708 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60709 void __anon_vma_link(struct vm_area_struct *);
60710
60711 static inline void anon_vma_merge(struct vm_area_struct *vma,
60712 diff --git a/include/linux/sched.h b/include/linux/sched.h
60713 index 1c4f3e9..b4e4851 100644
60714 --- a/include/linux/sched.h
60715 +++ b/include/linux/sched.h
60716 @@ -101,6 +101,7 @@ struct bio_list;
60717 struct fs_struct;
60718 struct perf_event_context;
60719 struct blk_plug;
60720 +struct linux_binprm;
60721
60722 /*
60723 * List of flags we want to share for kernel threads,
60724 @@ -380,10 +381,13 @@ struct user_namespace;
60725 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60726
60727 extern int sysctl_max_map_count;
60728 +extern unsigned long sysctl_heap_stack_gap;
60729
60730 #include <linux/aio.h>
60731
60732 #ifdef CONFIG_MMU
60733 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60734 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60735 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60736 extern unsigned long
60737 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60738 @@ -629,6 +633,17 @@ struct signal_struct {
60739 #ifdef CONFIG_TASKSTATS
60740 struct taskstats *stats;
60741 #endif
60742 +
60743 +#ifdef CONFIG_GRKERNSEC
60744 + u32 curr_ip;
60745 + u32 saved_ip;
60746 + u32 gr_saddr;
60747 + u32 gr_daddr;
60748 + u16 gr_sport;
60749 + u16 gr_dport;
60750 + u8 used_accept:1;
60751 +#endif
60752 +
60753 #ifdef CONFIG_AUDIT
60754 unsigned audit_tty;
60755 struct tty_audit_buf *tty_audit_buf;
60756 @@ -710,6 +725,11 @@ struct user_struct {
60757 struct key *session_keyring; /* UID's default session keyring */
60758 #endif
60759
60760 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60761 + unsigned int banned;
60762 + unsigned long ban_expires;
60763 +#endif
60764 +
60765 /* Hash table maintenance information */
60766 struct hlist_node uidhash_node;
60767 uid_t uid;
60768 @@ -1337,8 +1357,8 @@ struct task_struct {
60769 struct list_head thread_group;
60770
60771 struct completion *vfork_done; /* for vfork() */
60772 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60773 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60774 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60775 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60776
60777 cputime_t utime, stime, utimescaled, stimescaled;
60778 cputime_t gtime;
60779 @@ -1354,13 +1374,6 @@ struct task_struct {
60780 struct task_cputime cputime_expires;
60781 struct list_head cpu_timers[3];
60782
60783 -/* process credentials */
60784 - const struct cred __rcu *real_cred; /* objective and real subjective task
60785 - * credentials (COW) */
60786 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60787 - * credentials (COW) */
60788 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60789 -
60790 char comm[TASK_COMM_LEN]; /* executable name excluding path
60791 - access with [gs]et_task_comm (which lock
60792 it with task_lock())
60793 @@ -1377,8 +1390,16 @@ struct task_struct {
60794 #endif
60795 /* CPU-specific state of this task */
60796 struct thread_struct thread;
60797 +/* thread_info moved to task_struct */
60798 +#ifdef CONFIG_X86
60799 + struct thread_info tinfo;
60800 +#endif
60801 /* filesystem information */
60802 struct fs_struct *fs;
60803 +
60804 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60805 + * credentials (COW) */
60806 +
60807 /* open file information */
60808 struct files_struct *files;
60809 /* namespaces */
60810 @@ -1425,6 +1446,11 @@ struct task_struct {
60811 struct rt_mutex_waiter *pi_blocked_on;
60812 #endif
60813
60814 +/* process credentials */
60815 + const struct cred __rcu *real_cred; /* objective and real subjective task
60816 + * credentials (COW) */
60817 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60818 +
60819 #ifdef CONFIG_DEBUG_MUTEXES
60820 /* mutex deadlock detection */
60821 struct mutex_waiter *blocked_on;
60822 @@ -1540,6 +1566,27 @@ struct task_struct {
60823 unsigned long default_timer_slack_ns;
60824
60825 struct list_head *scm_work_list;
60826 +
60827 +#ifdef CONFIG_GRKERNSEC
60828 + /* grsecurity */
60829 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60830 + u64 exec_id;
60831 +#endif
60832 +#ifdef CONFIG_GRKERNSEC_SETXID
60833 + const struct cred *delayed_cred;
60834 +#endif
60835 + struct dentry *gr_chroot_dentry;
60836 + struct acl_subject_label *acl;
60837 + struct acl_role_label *role;
60838 + struct file *exec_file;
60839 + u16 acl_role_id;
60840 + /* is this the task that authenticated to the special role */
60841 + u8 acl_sp_role;
60842 + u8 is_writable;
60843 + u8 brute;
60844 + u8 gr_is_chrooted;
60845 +#endif
60846 +
60847 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60848 /* Index of current stored address in ret_stack */
60849 int curr_ret_stack;
60850 @@ -1574,6 +1621,51 @@ struct task_struct {
60851 #endif
60852 };
60853
60854 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60855 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60856 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60857 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60858 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60859 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60860 +
60861 +#ifdef CONFIG_PAX_SOFTMODE
60862 +extern int pax_softmode;
60863 +#endif
60864 +
60865 +extern int pax_check_flags(unsigned long *);
60866 +
60867 +/* if tsk != current then task_lock must be held on it */
60868 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60869 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60870 +{
60871 + if (likely(tsk->mm))
60872 + return tsk->mm->pax_flags;
60873 + else
60874 + return 0UL;
60875 +}
60876 +
60877 +/* if tsk != current then task_lock must be held on it */
60878 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60879 +{
60880 + if (likely(tsk->mm)) {
60881 + tsk->mm->pax_flags = flags;
60882 + return 0;
60883 + }
60884 + return -EINVAL;
60885 +}
60886 +#endif
60887 +
60888 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60889 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60890 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60891 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60892 +#endif
60893 +
60894 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60895 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60896 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60897 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60898 +
60899 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60900 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60901
60902 @@ -2081,7 +2173,9 @@ void yield(void);
60903 extern struct exec_domain default_exec_domain;
60904
60905 union thread_union {
60906 +#ifndef CONFIG_X86
60907 struct thread_info thread_info;
60908 +#endif
60909 unsigned long stack[THREAD_SIZE/sizeof(long)];
60910 };
60911
60912 @@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
60913 */
60914
60915 extern struct task_struct *find_task_by_vpid(pid_t nr);
60916 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60917 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60918 struct pid_namespace *ns);
60919
60920 @@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
60921 extern void mmput(struct mm_struct *);
60922 /* Grab a reference to a task's mm, if it is not already going away */
60923 extern struct mm_struct *get_task_mm(struct task_struct *task);
60924 +/*
60925 + * Grab a reference to a task's mm, if it is not already going away
60926 + * and ptrace_may_access with the mode parameter passed to it
60927 + * succeeds.
60928 + */
60929 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60930 /* Remove the current tasks stale references to the old mm_struct */
60931 extern void mm_release(struct task_struct *, struct mm_struct *);
60932 /* Allocate a new mm structure and copy contents from tsk->mm */
60933 @@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60934 extern void exit_itimers(struct signal_struct *);
60935 extern void flush_itimer_signals(void);
60936
60937 -extern NORET_TYPE void do_group_exit(int);
60938 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60939
60940 extern void daemonize(const char *, ...);
60941 extern int allow_signal(int);
60942 @@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60943
60944 #endif
60945
60946 -static inline int object_is_on_stack(void *obj)
60947 +static inline int object_starts_on_stack(void *obj)
60948 {
60949 - void *stack = task_stack_page(current);
60950 + const void *stack = task_stack_page(current);
60951
60952 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60953 }
60954
60955 +#ifdef CONFIG_PAX_USERCOPY
60956 +extern int object_is_on_stack(const void *obj, unsigned long len);
60957 +#endif
60958 +
60959 extern void thread_info_cache_init(void);
60960
60961 #ifdef CONFIG_DEBUG_STACK_USAGE
60962 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60963 index 899fbb4..1cb4138 100644
60964 --- a/include/linux/screen_info.h
60965 +++ b/include/linux/screen_info.h
60966 @@ -43,7 +43,8 @@ struct screen_info {
60967 __u16 pages; /* 0x32 */
60968 __u16 vesa_attributes; /* 0x34 */
60969 __u32 capabilities; /* 0x36 */
60970 - __u8 _reserved[6]; /* 0x3a */
60971 + __u16 vesapm_size; /* 0x3a */
60972 + __u8 _reserved[4]; /* 0x3c */
60973 } __attribute__((packed));
60974
60975 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60976 diff --git a/include/linux/security.h b/include/linux/security.h
60977 index e8c619d..e0cbd1c 100644
60978 --- a/include/linux/security.h
60979 +++ b/include/linux/security.h
60980 @@ -37,6 +37,7 @@
60981 #include <linux/xfrm.h>
60982 #include <linux/slab.h>
60983 #include <linux/xattr.h>
60984 +#include <linux/grsecurity.h>
60985 #include <net/flow.h>
60986
60987 /* Maximum number of letters for an LSM name string */
60988 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60989 index 0b69a46..b2ffa4c 100644
60990 --- a/include/linux/seq_file.h
60991 +++ b/include/linux/seq_file.h
60992 @@ -24,6 +24,9 @@ struct seq_file {
60993 struct mutex lock;
60994 const struct seq_operations *op;
60995 int poll_event;
60996 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60997 + u64 exec_id;
60998 +#endif
60999 void *private;
61000 };
61001
61002 @@ -33,6 +36,7 @@ struct seq_operations {
61003 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
61004 int (*show) (struct seq_file *m, void *v);
61005 };
61006 +typedef struct seq_operations __no_const seq_operations_no_const;
61007
61008 #define SEQ_SKIP 1
61009
61010 diff --git a/include/linux/shm.h b/include/linux/shm.h
61011 index 92808b8..c28cac4 100644
61012 --- a/include/linux/shm.h
61013 +++ b/include/linux/shm.h
61014 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
61015
61016 /* The task created the shm object. NULL if the task is dead. */
61017 struct task_struct *shm_creator;
61018 +#ifdef CONFIG_GRKERNSEC
61019 + time_t shm_createtime;
61020 + pid_t shm_lapid;
61021 +#endif
61022 };
61023
61024 /* shm_mode upper byte flags */
61025 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
61026 index fe86488..1563c1c 100644
61027 --- a/include/linux/skbuff.h
61028 +++ b/include/linux/skbuff.h
61029 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
61030 */
61031 static inline int skb_queue_empty(const struct sk_buff_head *list)
61032 {
61033 - return list->next == (struct sk_buff *)list;
61034 + return list->next == (const struct sk_buff *)list;
61035 }
61036
61037 /**
61038 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
61039 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61040 const struct sk_buff *skb)
61041 {
61042 - return skb->next == (struct sk_buff *)list;
61043 + return skb->next == (const struct sk_buff *)list;
61044 }
61045
61046 /**
61047 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61048 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
61049 const struct sk_buff *skb)
61050 {
61051 - return skb->prev == (struct sk_buff *)list;
61052 + return skb->prev == (const struct sk_buff *)list;
61053 }
61054
61055 /**
61056 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
61057 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
61058 */
61059 #ifndef NET_SKB_PAD
61060 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
61061 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
61062 #endif
61063
61064 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
61065 diff --git a/include/linux/slab.h b/include/linux/slab.h
61066 index 573c809..e84c132 100644
61067 --- a/include/linux/slab.h
61068 +++ b/include/linux/slab.h
61069 @@ -11,12 +11,20 @@
61070
61071 #include <linux/gfp.h>
61072 #include <linux/types.h>
61073 +#include <linux/err.h>
61074
61075 /*
61076 * Flags to pass to kmem_cache_create().
61077 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61078 */
61079 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61080 +
61081 +#ifdef CONFIG_PAX_USERCOPY
61082 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61083 +#else
61084 +#define SLAB_USERCOPY 0x00000000UL
61085 +#endif
61086 +
61087 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61088 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61089 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61090 @@ -87,10 +95,13 @@
61091 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61092 * Both make kfree a no-op.
61093 */
61094 -#define ZERO_SIZE_PTR ((void *)16)
61095 +#define ZERO_SIZE_PTR \
61096 +({ \
61097 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61098 + (void *)(-MAX_ERRNO-1L); \
61099 +})
61100
61101 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61102 - (unsigned long)ZERO_SIZE_PTR)
61103 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61104
61105 /*
61106 * struct kmem_cache related prototypes
61107 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
61108 void kfree(const void *);
61109 void kzfree(const void *);
61110 size_t ksize(const void *);
61111 +void check_object_size(const void *ptr, unsigned long n, bool to);
61112
61113 /*
61114 * Allocator specific definitions. These are mainly used to establish optimized
61115 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
61116
61117 void __init kmem_cache_init_late(void);
61118
61119 +#define kmalloc(x, y) \
61120 +({ \
61121 + void *___retval; \
61122 + intoverflow_t ___x = (intoverflow_t)x; \
61123 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
61124 + ___retval = NULL; \
61125 + else \
61126 + ___retval = kmalloc((size_t)___x, (y)); \
61127 + ___retval; \
61128 +})
61129 +
61130 +#define kmalloc_node(x, y, z) \
61131 +({ \
61132 + void *___retval; \
61133 + intoverflow_t ___x = (intoverflow_t)x; \
61134 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61135 + ___retval = NULL; \
61136 + else \
61137 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
61138 + ___retval; \
61139 +})
61140 +
61141 +#define kzalloc(x, y) \
61142 +({ \
61143 + void *___retval; \
61144 + intoverflow_t ___x = (intoverflow_t)x; \
61145 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
61146 + ___retval = NULL; \
61147 + else \
61148 + ___retval = kzalloc((size_t)___x, (y)); \
61149 + ___retval; \
61150 +})
61151 +
61152 +#define __krealloc(x, y, z) \
61153 +({ \
61154 + void *___retval; \
61155 + intoverflow_t ___y = (intoverflow_t)y; \
61156 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
61157 + ___retval = NULL; \
61158 + else \
61159 + ___retval = __krealloc((x), (size_t)___y, (z)); \
61160 + ___retval; \
61161 +})
61162 +
61163 +#define krealloc(x, y, z) \
61164 +({ \
61165 + void *___retval; \
61166 + intoverflow_t ___y = (intoverflow_t)y; \
61167 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
61168 + ___retval = NULL; \
61169 + else \
61170 + ___retval = krealloc((x), (size_t)___y, (z)); \
61171 + ___retval; \
61172 +})
61173 +
61174 #endif /* _LINUX_SLAB_H */
61175 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
61176 index d00e0ba..1b3bf7b 100644
61177 --- a/include/linux/slab_def.h
61178 +++ b/include/linux/slab_def.h
61179 @@ -68,10 +68,10 @@ struct kmem_cache {
61180 unsigned long node_allocs;
61181 unsigned long node_frees;
61182 unsigned long node_overflow;
61183 - atomic_t allochit;
61184 - atomic_t allocmiss;
61185 - atomic_t freehit;
61186 - atomic_t freemiss;
61187 + atomic_unchecked_t allochit;
61188 + atomic_unchecked_t allocmiss;
61189 + atomic_unchecked_t freehit;
61190 + atomic_unchecked_t freemiss;
61191
61192 /*
61193 * If debugging is enabled, then the allocator can add additional
61194 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
61195 index a32bcfd..53b71f4 100644
61196 --- a/include/linux/slub_def.h
61197 +++ b/include/linux/slub_def.h
61198 @@ -89,7 +89,7 @@ struct kmem_cache {
61199 struct kmem_cache_order_objects max;
61200 struct kmem_cache_order_objects min;
61201 gfp_t allocflags; /* gfp flags to use on each alloc */
61202 - int refcount; /* Refcount for slab cache destroy */
61203 + atomic_t refcount; /* Refcount for slab cache destroy */
61204 void (*ctor)(void *);
61205 int inuse; /* Offset to metadata */
61206 int align; /* Alignment */
61207 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
61208 }
61209
61210 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61211 -void *__kmalloc(size_t size, gfp_t flags);
61212 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61213
61214 static __always_inline void *
61215 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
61216 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
61217 index de8832d..0147b46 100644
61218 --- a/include/linux/sonet.h
61219 +++ b/include/linux/sonet.h
61220 @@ -61,7 +61,7 @@ struct sonet_stats {
61221 #include <linux/atomic.h>
61222
61223 struct k_sonet_stats {
61224 -#define __HANDLE_ITEM(i) atomic_t i
61225 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
61226 __SONET_ITEMS
61227 #undef __HANDLE_ITEM
61228 };
61229 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
61230 index 3d8f9c4..69f1c0a 100644
61231 --- a/include/linux/sunrpc/clnt.h
61232 +++ b/include/linux/sunrpc/clnt.h
61233 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
61234 {
61235 switch (sap->sa_family) {
61236 case AF_INET:
61237 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
61238 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61239 case AF_INET6:
61240 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61241 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61242 }
61243 return 0;
61244 }
61245 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
61246 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61247 const struct sockaddr *src)
61248 {
61249 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61250 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61251 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61252
61253 dsin->sin_family = ssin->sin_family;
61254 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
61255 if (sa->sa_family != AF_INET6)
61256 return 0;
61257
61258 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61259 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61260 }
61261
61262 #endif /* __KERNEL__ */
61263 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
61264 index e775689..9e206d9 100644
61265 --- a/include/linux/sunrpc/sched.h
61266 +++ b/include/linux/sunrpc/sched.h
61267 @@ -105,6 +105,7 @@ struct rpc_call_ops {
61268 void (*rpc_call_done)(struct rpc_task *, void *);
61269 void (*rpc_release)(void *);
61270 };
61271 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61272
61273 struct rpc_task_setup {
61274 struct rpc_task *task;
61275 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
61276 index c14fe86..393245e 100644
61277 --- a/include/linux/sunrpc/svc_rdma.h
61278 +++ b/include/linux/sunrpc/svc_rdma.h
61279 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61280 extern unsigned int svcrdma_max_requests;
61281 extern unsigned int svcrdma_max_req_size;
61282
61283 -extern atomic_t rdma_stat_recv;
61284 -extern atomic_t rdma_stat_read;
61285 -extern atomic_t rdma_stat_write;
61286 -extern atomic_t rdma_stat_sq_starve;
61287 -extern atomic_t rdma_stat_rq_starve;
61288 -extern atomic_t rdma_stat_rq_poll;
61289 -extern atomic_t rdma_stat_rq_prod;
61290 -extern atomic_t rdma_stat_sq_poll;
61291 -extern atomic_t rdma_stat_sq_prod;
61292 +extern atomic_unchecked_t rdma_stat_recv;
61293 +extern atomic_unchecked_t rdma_stat_read;
61294 +extern atomic_unchecked_t rdma_stat_write;
61295 +extern atomic_unchecked_t rdma_stat_sq_starve;
61296 +extern atomic_unchecked_t rdma_stat_rq_starve;
61297 +extern atomic_unchecked_t rdma_stat_rq_poll;
61298 +extern atomic_unchecked_t rdma_stat_rq_prod;
61299 +extern atomic_unchecked_t rdma_stat_sq_poll;
61300 +extern atomic_unchecked_t rdma_stat_sq_prod;
61301
61302 #define RPCRDMA_VERSION 1
61303
61304 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61305 index 703cfa33..0b8ca72ac 100644
61306 --- a/include/linux/sysctl.h
61307 +++ b/include/linux/sysctl.h
61308 @@ -155,7 +155,11 @@ enum
61309 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61310 };
61311
61312 -
61313 +#ifdef CONFIG_PAX_SOFTMODE
61314 +enum {
61315 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61316 +};
61317 +#endif
61318
61319 /* CTL_VM names: */
61320 enum
61321 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61322
61323 extern int proc_dostring(struct ctl_table *, int,
61324 void __user *, size_t *, loff_t *);
61325 +extern int proc_dostring_modpriv(struct ctl_table *, int,
61326 + void __user *, size_t *, loff_t *);
61327 extern int proc_dointvec(struct ctl_table *, int,
61328 void __user *, size_t *, loff_t *);
61329 extern int proc_dointvec_minmax(struct ctl_table *, int,
61330 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
61331 index a71a292..51bd91d 100644
61332 --- a/include/linux/tracehook.h
61333 +++ b/include/linux/tracehook.h
61334 @@ -54,12 +54,12 @@ struct linux_binprm;
61335 /*
61336 * ptrace report for syscall entry and exit looks identical.
61337 */
61338 -static inline void ptrace_report_syscall(struct pt_regs *regs)
61339 +static inline int ptrace_report_syscall(struct pt_regs *regs)
61340 {
61341 int ptrace = current->ptrace;
61342
61343 if (!(ptrace & PT_PTRACED))
61344 - return;
61345 + return 0;
61346
61347 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
61348
61349 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61350 send_sig(current->exit_code, current, 1);
61351 current->exit_code = 0;
61352 }
61353 +
61354 + return fatal_signal_pending(current);
61355 }
61356
61357 /**
61358 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61359 static inline __must_check int tracehook_report_syscall_entry(
61360 struct pt_regs *regs)
61361 {
61362 - ptrace_report_syscall(regs);
61363 - return 0;
61364 + return ptrace_report_syscall(regs);
61365 }
61366
61367 /**
61368 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61369 index ff7dc08..893e1bd 100644
61370 --- a/include/linux/tty_ldisc.h
61371 +++ b/include/linux/tty_ldisc.h
61372 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61373
61374 struct module *owner;
61375
61376 - int refcount;
61377 + atomic_t refcount;
61378 };
61379
61380 struct tty_ldisc {
61381 diff --git a/include/linux/types.h b/include/linux/types.h
61382 index 57a9723..dbe234a 100644
61383 --- a/include/linux/types.h
61384 +++ b/include/linux/types.h
61385 @@ -213,10 +213,26 @@ typedef struct {
61386 int counter;
61387 } atomic_t;
61388
61389 +#ifdef CONFIG_PAX_REFCOUNT
61390 +typedef struct {
61391 + int counter;
61392 +} atomic_unchecked_t;
61393 +#else
61394 +typedef atomic_t atomic_unchecked_t;
61395 +#endif
61396 +
61397 #ifdef CONFIG_64BIT
61398 typedef struct {
61399 long counter;
61400 } atomic64_t;
61401 +
61402 +#ifdef CONFIG_PAX_REFCOUNT
61403 +typedef struct {
61404 + long counter;
61405 +} atomic64_unchecked_t;
61406 +#else
61407 +typedef atomic64_t atomic64_unchecked_t;
61408 +#endif
61409 #endif
61410
61411 struct list_head {
61412 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61413 index 5ca0951..ab496a5 100644
61414 --- a/include/linux/uaccess.h
61415 +++ b/include/linux/uaccess.h
61416 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61417 long ret; \
61418 mm_segment_t old_fs = get_fs(); \
61419 \
61420 - set_fs(KERNEL_DS); \
61421 pagefault_disable(); \
61422 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61423 - pagefault_enable(); \
61424 + set_fs(KERNEL_DS); \
61425 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61426 set_fs(old_fs); \
61427 + pagefault_enable(); \
61428 ret; \
61429 })
61430
61431 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61432 index 99c1b4d..bb94261 100644
61433 --- a/include/linux/unaligned/access_ok.h
61434 +++ b/include/linux/unaligned/access_ok.h
61435 @@ -6,32 +6,32 @@
61436
61437 static inline u16 get_unaligned_le16(const void *p)
61438 {
61439 - return le16_to_cpup((__le16 *)p);
61440 + return le16_to_cpup((const __le16 *)p);
61441 }
61442
61443 static inline u32 get_unaligned_le32(const void *p)
61444 {
61445 - return le32_to_cpup((__le32 *)p);
61446 + return le32_to_cpup((const __le32 *)p);
61447 }
61448
61449 static inline u64 get_unaligned_le64(const void *p)
61450 {
61451 - return le64_to_cpup((__le64 *)p);
61452 + return le64_to_cpup((const __le64 *)p);
61453 }
61454
61455 static inline u16 get_unaligned_be16(const void *p)
61456 {
61457 - return be16_to_cpup((__be16 *)p);
61458 + return be16_to_cpup((const __be16 *)p);
61459 }
61460
61461 static inline u32 get_unaligned_be32(const void *p)
61462 {
61463 - return be32_to_cpup((__be32 *)p);
61464 + return be32_to_cpup((const __be32 *)p);
61465 }
61466
61467 static inline u64 get_unaligned_be64(const void *p)
61468 {
61469 - return be64_to_cpup((__be64 *)p);
61470 + return be64_to_cpup((const __be64 *)p);
61471 }
61472
61473 static inline void put_unaligned_le16(u16 val, void *p)
61474 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61475 index e5a40c3..20ab0f6 100644
61476 --- a/include/linux/usb/renesas_usbhs.h
61477 +++ b/include/linux/usb/renesas_usbhs.h
61478 @@ -39,7 +39,7 @@ enum {
61479 */
61480 struct renesas_usbhs_driver_callback {
61481 int (*notify_hotplug)(struct platform_device *pdev);
61482 -};
61483 +} __no_const;
61484
61485 /*
61486 * callback functions for platform
61487 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61488 * VBUS control is needed for Host
61489 */
61490 int (*set_vbus)(struct platform_device *pdev, int enable);
61491 -};
61492 +} __no_const;
61493
61494 /*
61495 * parameters for renesas usbhs
61496 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61497 index 6f8fbcf..8259001 100644
61498 --- a/include/linux/vermagic.h
61499 +++ b/include/linux/vermagic.h
61500 @@ -25,9 +25,35 @@
61501 #define MODULE_ARCH_VERMAGIC ""
61502 #endif
61503
61504 +#ifdef CONFIG_PAX_REFCOUNT
61505 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
61506 +#else
61507 +#define MODULE_PAX_REFCOUNT ""
61508 +#endif
61509 +
61510 +#ifdef CONSTIFY_PLUGIN
61511 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61512 +#else
61513 +#define MODULE_CONSTIFY_PLUGIN ""
61514 +#endif
61515 +
61516 +#ifdef STACKLEAK_PLUGIN
61517 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61518 +#else
61519 +#define MODULE_STACKLEAK_PLUGIN ""
61520 +#endif
61521 +
61522 +#ifdef CONFIG_GRKERNSEC
61523 +#define MODULE_GRSEC "GRSEC "
61524 +#else
61525 +#define MODULE_GRSEC ""
61526 +#endif
61527 +
61528 #define VERMAGIC_STRING \
61529 UTS_RELEASE " " \
61530 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61531 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61532 - MODULE_ARCH_VERMAGIC
61533 + MODULE_ARCH_VERMAGIC \
61534 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61535 + MODULE_GRSEC
61536
61537 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61538 index 4bde182..aec92c1 100644
61539 --- a/include/linux/vmalloc.h
61540 +++ b/include/linux/vmalloc.h
61541 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61542 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61543 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61544 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61545 +
61546 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61547 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61548 +#endif
61549 +
61550 /* bits [20..32] reserved for arch specific ioremap internals */
61551
61552 /*
61553 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61554 # endif
61555 #endif
61556
61557 +#define vmalloc(x) \
61558 +({ \
61559 + void *___retval; \
61560 + intoverflow_t ___x = (intoverflow_t)x; \
61561 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61562 + ___retval = NULL; \
61563 + else \
61564 + ___retval = vmalloc((unsigned long)___x); \
61565 + ___retval; \
61566 +})
61567 +
61568 +#define vzalloc(x) \
61569 +({ \
61570 + void *___retval; \
61571 + intoverflow_t ___x = (intoverflow_t)x; \
61572 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61573 + ___retval = NULL; \
61574 + else \
61575 + ___retval = vzalloc((unsigned long)___x); \
61576 + ___retval; \
61577 +})
61578 +
61579 +#define __vmalloc(x, y, z) \
61580 +({ \
61581 + void *___retval; \
61582 + intoverflow_t ___x = (intoverflow_t)x; \
61583 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61584 + ___retval = NULL; \
61585 + else \
61586 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61587 + ___retval; \
61588 +})
61589 +
61590 +#define vmalloc_user(x) \
61591 +({ \
61592 + void *___retval; \
61593 + intoverflow_t ___x = (intoverflow_t)x; \
61594 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61595 + ___retval = NULL; \
61596 + else \
61597 + ___retval = vmalloc_user((unsigned long)___x); \
61598 + ___retval; \
61599 +})
61600 +
61601 +#define vmalloc_exec(x) \
61602 +({ \
61603 + void *___retval; \
61604 + intoverflow_t ___x = (intoverflow_t)x; \
61605 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61606 + ___retval = NULL; \
61607 + else \
61608 + ___retval = vmalloc_exec((unsigned long)___x); \
61609 + ___retval; \
61610 +})
61611 +
61612 +#define vmalloc_node(x, y) \
61613 +({ \
61614 + void *___retval; \
61615 + intoverflow_t ___x = (intoverflow_t)x; \
61616 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61617 + ___retval = NULL; \
61618 + else \
61619 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61620 + ___retval; \
61621 +})
61622 +
61623 +#define vzalloc_node(x, y) \
61624 +({ \
61625 + void *___retval; \
61626 + intoverflow_t ___x = (intoverflow_t)x; \
61627 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61628 + ___retval = NULL; \
61629 + else \
61630 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61631 + ___retval; \
61632 +})
61633 +
61634 +#define vmalloc_32(x) \
61635 +({ \
61636 + void *___retval; \
61637 + intoverflow_t ___x = (intoverflow_t)x; \
61638 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61639 + ___retval = NULL; \
61640 + else \
61641 + ___retval = vmalloc_32((unsigned long)___x); \
61642 + ___retval; \
61643 +})
61644 +
61645 +#define vmalloc_32_user(x) \
61646 +({ \
61647 +void *___retval; \
61648 + intoverflow_t ___x = (intoverflow_t)x; \
61649 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61650 + ___retval = NULL; \
61651 + else \
61652 + ___retval = vmalloc_32_user((unsigned long)___x);\
61653 + ___retval; \
61654 +})
61655 +
61656 #endif /* _LINUX_VMALLOC_H */
61657 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61658 index 65efb92..137adbb 100644
61659 --- a/include/linux/vmstat.h
61660 +++ b/include/linux/vmstat.h
61661 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61662 /*
61663 * Zone based page accounting with per cpu differentials.
61664 */
61665 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61666 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61667
61668 static inline void zone_page_state_add(long x, struct zone *zone,
61669 enum zone_stat_item item)
61670 {
61671 - atomic_long_add(x, &zone->vm_stat[item]);
61672 - atomic_long_add(x, &vm_stat[item]);
61673 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61674 + atomic_long_add_unchecked(x, &vm_stat[item]);
61675 }
61676
61677 static inline unsigned long global_page_state(enum zone_stat_item item)
61678 {
61679 - long x = atomic_long_read(&vm_stat[item]);
61680 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61681 #ifdef CONFIG_SMP
61682 if (x < 0)
61683 x = 0;
61684 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61685 static inline unsigned long zone_page_state(struct zone *zone,
61686 enum zone_stat_item item)
61687 {
61688 - long x = atomic_long_read(&zone->vm_stat[item]);
61689 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61690 #ifdef CONFIG_SMP
61691 if (x < 0)
61692 x = 0;
61693 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61694 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61695 enum zone_stat_item item)
61696 {
61697 - long x = atomic_long_read(&zone->vm_stat[item]);
61698 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61699
61700 #ifdef CONFIG_SMP
61701 int cpu;
61702 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61703
61704 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61705 {
61706 - atomic_long_inc(&zone->vm_stat[item]);
61707 - atomic_long_inc(&vm_stat[item]);
61708 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61709 + atomic_long_inc_unchecked(&vm_stat[item]);
61710 }
61711
61712 static inline void __inc_zone_page_state(struct page *page,
61713 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61714
61715 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61716 {
61717 - atomic_long_dec(&zone->vm_stat[item]);
61718 - atomic_long_dec(&vm_stat[item]);
61719 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61720 + atomic_long_dec_unchecked(&vm_stat[item]);
61721 }
61722
61723 static inline void __dec_zone_page_state(struct page *page,
61724 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61725 index e5d1220..ef6e406 100644
61726 --- a/include/linux/xattr.h
61727 +++ b/include/linux/xattr.h
61728 @@ -57,6 +57,11 @@
61729 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61730 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61731
61732 +/* User namespace */
61733 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61734 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
61735 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61736 +
61737 #ifdef __KERNEL__
61738
61739 #include <linux/types.h>
61740 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61741 index 4aeff96..b378cdc 100644
61742 --- a/include/media/saa7146_vv.h
61743 +++ b/include/media/saa7146_vv.h
61744 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61745 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61746
61747 /* the extension can override this */
61748 - struct v4l2_ioctl_ops ops;
61749 + v4l2_ioctl_ops_no_const ops;
61750 /* pointer to the saa7146 core ops */
61751 const struct v4l2_ioctl_ops *core_ops;
61752
61753 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61754 index c7c40f1..4f01585 100644
61755 --- a/include/media/v4l2-dev.h
61756 +++ b/include/media/v4l2-dev.h
61757 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61758
61759
61760 struct v4l2_file_operations {
61761 - struct module *owner;
61762 + struct module * const owner;
61763 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61764 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61765 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61766 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61767 int (*open) (struct file *);
61768 int (*release) (struct file *);
61769 };
61770 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61771
61772 /*
61773 * Newer version of video_device, handled by videodev2.c
61774 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61775 index 4d1c74a..65e1221 100644
61776 --- a/include/media/v4l2-ioctl.h
61777 +++ b/include/media/v4l2-ioctl.h
61778 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61779 long (*vidioc_default) (struct file *file, void *fh,
61780 bool valid_prio, int cmd, void *arg);
61781 };
61782 -
61783 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61784
61785 /* v4l debugging and diagnostics */
61786
61787 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61788 index 8d55251..dfe5b0a 100644
61789 --- a/include/net/caif/caif_hsi.h
61790 +++ b/include/net/caif/caif_hsi.h
61791 @@ -98,7 +98,7 @@ struct cfhsi_drv {
61792 void (*rx_done_cb) (struct cfhsi_drv *drv);
61793 void (*wake_up_cb) (struct cfhsi_drv *drv);
61794 void (*wake_down_cb) (struct cfhsi_drv *drv);
61795 -};
61796 +} __no_const;
61797
61798 /* Structure implemented by HSI device. */
61799 struct cfhsi_dev {
61800 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61801 index 9e5425b..8136ffc 100644
61802 --- a/include/net/caif/cfctrl.h
61803 +++ b/include/net/caif/cfctrl.h
61804 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61805 void (*radioset_rsp)(void);
61806 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61807 struct cflayer *client_layer);
61808 -};
61809 +} __no_const;
61810
61811 /* Link Setup Parameters for CAIF-Links. */
61812 struct cfctrl_link_param {
61813 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61814 struct cfctrl {
61815 struct cfsrvl serv;
61816 struct cfctrl_rsp res;
61817 - atomic_t req_seq_no;
61818 - atomic_t rsp_seq_no;
61819 + atomic_unchecked_t req_seq_no;
61820 + atomic_unchecked_t rsp_seq_no;
61821 struct list_head list;
61822 /* Protects from simultaneous access to first_req list */
61823 spinlock_t info_list_lock;
61824 diff --git a/include/net/flow.h b/include/net/flow.h
61825 index 2a7eefd..3250f3b 100644
61826 --- a/include/net/flow.h
61827 +++ b/include/net/flow.h
61828 @@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61829
61830 extern void flow_cache_flush(void);
61831 extern void flow_cache_flush_deferred(void);
61832 -extern atomic_t flow_cache_genid;
61833 +extern atomic_unchecked_t flow_cache_genid;
61834
61835 #endif
61836 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61837 index e9ff3fc..9d3e5c7 100644
61838 --- a/include/net/inetpeer.h
61839 +++ b/include/net/inetpeer.h
61840 @@ -48,8 +48,8 @@ struct inet_peer {
61841 */
61842 union {
61843 struct {
61844 - atomic_t rid; /* Frag reception counter */
61845 - atomic_t ip_id_count; /* IP ID for the next packet */
61846 + atomic_unchecked_t rid; /* Frag reception counter */
61847 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61848 __u32 tcp_ts;
61849 __u32 tcp_ts_stamp;
61850 };
61851 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61852 more++;
61853 inet_peer_refcheck(p);
61854 do {
61855 - old = atomic_read(&p->ip_id_count);
61856 + old = atomic_read_unchecked(&p->ip_id_count);
61857 new = old + more;
61858 if (!new)
61859 new = 1;
61860 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61861 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61862 return new;
61863 }
61864
61865 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61866 index 10422ef..662570f 100644
61867 --- a/include/net/ip_fib.h
61868 +++ b/include/net/ip_fib.h
61869 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61870
61871 #define FIB_RES_SADDR(net, res) \
61872 ((FIB_RES_NH(res).nh_saddr_genid == \
61873 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61874 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61875 FIB_RES_NH(res).nh_saddr : \
61876 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61877 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61878 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61879 index e5a7b9a..f4fc44b 100644
61880 --- a/include/net/ip_vs.h
61881 +++ b/include/net/ip_vs.h
61882 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61883 struct ip_vs_conn *control; /* Master control connection */
61884 atomic_t n_control; /* Number of controlled ones */
61885 struct ip_vs_dest *dest; /* real server */
61886 - atomic_t in_pkts; /* incoming packet counter */
61887 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61888
61889 /* packet transmitter for different forwarding methods. If it
61890 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61891 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61892 __be16 port; /* port number of the server */
61893 union nf_inet_addr addr; /* IP address of the server */
61894 volatile unsigned flags; /* dest status flags */
61895 - atomic_t conn_flags; /* flags to copy to conn */
61896 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61897 atomic_t weight; /* server weight */
61898
61899 atomic_t refcnt; /* reference counter */
61900 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61901 index 69b610a..fe3962c 100644
61902 --- a/include/net/irda/ircomm_core.h
61903 +++ b/include/net/irda/ircomm_core.h
61904 @@ -51,7 +51,7 @@ typedef struct {
61905 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61906 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61907 struct ircomm_info *);
61908 -} call_t;
61909 +} __no_const call_t;
61910
61911 struct ircomm_cb {
61912 irda_queue_t queue;
61913 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61914 index 59ba38bc..d515662 100644
61915 --- a/include/net/irda/ircomm_tty.h
61916 +++ b/include/net/irda/ircomm_tty.h
61917 @@ -35,6 +35,7 @@
61918 #include <linux/termios.h>
61919 #include <linux/timer.h>
61920 #include <linux/tty.h> /* struct tty_struct */
61921 +#include <asm/local.h>
61922
61923 #include <net/irda/irias_object.h>
61924 #include <net/irda/ircomm_core.h>
61925 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61926 unsigned short close_delay;
61927 unsigned short closing_wait; /* time to wait before closing */
61928
61929 - int open_count;
61930 - int blocked_open; /* # of blocked opens */
61931 + local_t open_count;
61932 + local_t blocked_open; /* # of blocked opens */
61933
61934 /* Protect concurent access to :
61935 * o self->open_count
61936 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61937 index f2419cf..473679f 100644
61938 --- a/include/net/iucv/af_iucv.h
61939 +++ b/include/net/iucv/af_iucv.h
61940 @@ -139,7 +139,7 @@ struct iucv_sock {
61941 struct iucv_sock_list {
61942 struct hlist_head head;
61943 rwlock_t lock;
61944 - atomic_t autobind_name;
61945 + atomic_unchecked_t autobind_name;
61946 };
61947
61948 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61949 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61950 index 2720884..3aa5c25 100644
61951 --- a/include/net/neighbour.h
61952 +++ b/include/net/neighbour.h
61953 @@ -122,7 +122,7 @@ struct neigh_ops {
61954 void (*error_report)(struct neighbour *, struct sk_buff *);
61955 int (*output)(struct neighbour *, struct sk_buff *);
61956 int (*connected_output)(struct neighbour *, struct sk_buff *);
61957 -};
61958 +} __do_const;
61959
61960 struct pneigh_entry {
61961 struct pneigh_entry *next;
61962 diff --git a/include/net/netlink.h b/include/net/netlink.h
61963 index cb1f350..3279d2c 100644
61964 --- a/include/net/netlink.h
61965 +++ b/include/net/netlink.h
61966 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61967 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61968 {
61969 if (mark)
61970 - skb_trim(skb, (unsigned char *) mark - skb->data);
61971 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61972 }
61973
61974 /**
61975 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61976 index d786b4f..4c3dd41 100644
61977 --- a/include/net/netns/ipv4.h
61978 +++ b/include/net/netns/ipv4.h
61979 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61980
61981 unsigned int sysctl_ping_group_range[2];
61982
61983 - atomic_t rt_genid;
61984 - atomic_t dev_addr_genid;
61985 + atomic_unchecked_t rt_genid;
61986 + atomic_unchecked_t dev_addr_genid;
61987
61988 #ifdef CONFIG_IP_MROUTE
61989 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61990 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61991 index 6a72a58..e6a127d 100644
61992 --- a/include/net/sctp/sctp.h
61993 +++ b/include/net/sctp/sctp.h
61994 @@ -318,9 +318,9 @@ do { \
61995
61996 #else /* SCTP_DEBUG */
61997
61998 -#define SCTP_DEBUG_PRINTK(whatever...)
61999 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
62000 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
62001 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
62002 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
62003 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
62004 #define SCTP_ENABLE_DEBUG
62005 #define SCTP_DISABLE_DEBUG
62006 #define SCTP_ASSERT(expr, str, func)
62007 diff --git a/include/net/sock.h b/include/net/sock.h
62008 index 32e3937..87a1dbc 100644
62009 --- a/include/net/sock.h
62010 +++ b/include/net/sock.h
62011 @@ -277,7 +277,7 @@ struct sock {
62012 #ifdef CONFIG_RPS
62013 __u32 sk_rxhash;
62014 #endif
62015 - atomic_t sk_drops;
62016 + atomic_unchecked_t sk_drops;
62017 int sk_rcvbuf;
62018
62019 struct sk_filter __rcu *sk_filter;
62020 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
62021 }
62022
62023 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
62024 - char __user *from, char *to,
62025 + char __user *from, unsigned char *to,
62026 int copy, int offset)
62027 {
62028 if (skb->ip_summed == CHECKSUM_NONE) {
62029 diff --git a/include/net/tcp.h b/include/net/tcp.h
62030 index bb18c4d..bb87972 100644
62031 --- a/include/net/tcp.h
62032 +++ b/include/net/tcp.h
62033 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
62034 char *name;
62035 sa_family_t family;
62036 const struct file_operations *seq_fops;
62037 - struct seq_operations seq_ops;
62038 + seq_operations_no_const seq_ops;
62039 };
62040
62041 struct tcp_iter_state {
62042 diff --git a/include/net/udp.h b/include/net/udp.h
62043 index 3b285f4..0219639 100644
62044 --- a/include/net/udp.h
62045 +++ b/include/net/udp.h
62046 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
62047 sa_family_t family;
62048 struct udp_table *udp_table;
62049 const struct file_operations *seq_fops;
62050 - struct seq_operations seq_ops;
62051 + seq_operations_no_const seq_ops;
62052 };
62053
62054 struct udp_iter_state {
62055 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
62056 index b203e14..1df3991 100644
62057 --- a/include/net/xfrm.h
62058 +++ b/include/net/xfrm.h
62059 @@ -505,7 +505,7 @@ struct xfrm_policy {
62060 struct timer_list timer;
62061
62062 struct flow_cache_object flo;
62063 - atomic_t genid;
62064 + atomic_unchecked_t genid;
62065 u32 priority;
62066 u32 index;
62067 struct xfrm_mark mark;
62068 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
62069 index 1a046b1..ee0bef0 100644
62070 --- a/include/rdma/iw_cm.h
62071 +++ b/include/rdma/iw_cm.h
62072 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
62073 int backlog);
62074
62075 int (*destroy_listen)(struct iw_cm_id *cm_id);
62076 -};
62077 +} __no_const;
62078
62079 /**
62080 * iw_create_cm_id - Create an IW CM identifier.
62081 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
62082 index 5d1a758..1dbf795 100644
62083 --- a/include/scsi/libfc.h
62084 +++ b/include/scsi/libfc.h
62085 @@ -748,6 +748,7 @@ struct libfc_function_template {
62086 */
62087 void (*disc_stop_final) (struct fc_lport *);
62088 };
62089 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
62090
62091 /**
62092 * struct fc_disc - Discovery context
62093 @@ -851,7 +852,7 @@ struct fc_lport {
62094 struct fc_vport *vport;
62095
62096 /* Operational Information */
62097 - struct libfc_function_template tt;
62098 + libfc_function_template_no_const tt;
62099 u8 link_up;
62100 u8 qfull;
62101 enum fc_lport_state state;
62102 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
62103 index 5591ed5..13eb457 100644
62104 --- a/include/scsi/scsi_device.h
62105 +++ b/include/scsi/scsi_device.h
62106 @@ -161,9 +161,9 @@ struct scsi_device {
62107 unsigned int max_device_blocked; /* what device_blocked counts down from */
62108 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
62109
62110 - atomic_t iorequest_cnt;
62111 - atomic_t iodone_cnt;
62112 - atomic_t ioerr_cnt;
62113 + atomic_unchecked_t iorequest_cnt;
62114 + atomic_unchecked_t iodone_cnt;
62115 + atomic_unchecked_t ioerr_cnt;
62116
62117 struct device sdev_gendev,
62118 sdev_dev;
62119 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
62120 index 2a65167..91e01f8 100644
62121 --- a/include/scsi/scsi_transport_fc.h
62122 +++ b/include/scsi/scsi_transport_fc.h
62123 @@ -711,7 +711,7 @@ struct fc_function_template {
62124 unsigned long show_host_system_hostname:1;
62125
62126 unsigned long disable_target_scan:1;
62127 -};
62128 +} __do_const;
62129
62130
62131 /**
62132 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
62133 index 030b87c..98a6954 100644
62134 --- a/include/sound/ak4xxx-adda.h
62135 +++ b/include/sound/ak4xxx-adda.h
62136 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62137 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62138 unsigned char val);
62139 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62140 -};
62141 +} __no_const;
62142
62143 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62144
62145 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
62146 index 8c05e47..2b5df97 100644
62147 --- a/include/sound/hwdep.h
62148 +++ b/include/sound/hwdep.h
62149 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62150 struct snd_hwdep_dsp_status *status);
62151 int (*dsp_load)(struct snd_hwdep *hw,
62152 struct snd_hwdep_dsp_image *image);
62153 -};
62154 +} __no_const;
62155
62156 struct snd_hwdep {
62157 struct snd_card *card;
62158 diff --git a/include/sound/info.h b/include/sound/info.h
62159 index 5492cc4..1a65278 100644
62160 --- a/include/sound/info.h
62161 +++ b/include/sound/info.h
62162 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
62163 struct snd_info_buffer *buffer);
62164 void (*write)(struct snd_info_entry *entry,
62165 struct snd_info_buffer *buffer);
62166 -};
62167 +} __no_const;
62168
62169 struct snd_info_entry_ops {
62170 int (*open)(struct snd_info_entry *entry,
62171 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
62172 index 0cf91b2..b70cae4 100644
62173 --- a/include/sound/pcm.h
62174 +++ b/include/sound/pcm.h
62175 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
62176 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
62177 int (*ack)(struct snd_pcm_substream *substream);
62178 };
62179 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
62180
62181 /*
62182 *
62183 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
62184 index af1b49e..a5d55a5 100644
62185 --- a/include/sound/sb16_csp.h
62186 +++ b/include/sound/sb16_csp.h
62187 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
62188 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62189 int (*csp_stop) (struct snd_sb_csp * p);
62190 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62191 -};
62192 +} __no_const;
62193
62194 /*
62195 * CSP private data
62196 diff --git a/include/sound/soc.h b/include/sound/soc.h
62197 index 11cfb59..e3f93f4 100644
62198 --- a/include/sound/soc.h
62199 +++ b/include/sound/soc.h
62200 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
62201 /* platform IO - used for platform DAPM */
62202 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
62203 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
62204 -};
62205 +} __do_const;
62206
62207 struct snd_soc_platform {
62208 const char *name;
62209 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
62210 index 444cd6b..3327cc5 100644
62211 --- a/include/sound/ymfpci.h
62212 +++ b/include/sound/ymfpci.h
62213 @@ -358,7 +358,7 @@ struct snd_ymfpci {
62214 spinlock_t reg_lock;
62215 spinlock_t voice_lock;
62216 wait_queue_head_t interrupt_sleep;
62217 - atomic_t interrupt_sleep_count;
62218 + atomic_unchecked_t interrupt_sleep_count;
62219 struct snd_info_entry *proc_entry;
62220 const struct firmware *dsp_microcode;
62221 const struct firmware *controller_microcode;
62222 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
62223 index 94bbec3..3a8c6b0 100644
62224 --- a/include/target/target_core_base.h
62225 +++ b/include/target/target_core_base.h
62226 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
62227 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
62228 int (*t10_pr_register)(struct se_cmd *);
62229 int (*t10_pr_clear)(struct se_cmd *);
62230 -};
62231 +} __no_const;
62232
62233 struct t10_reservation {
62234 /* Reservation effects all target ports */
62235 @@ -465,8 +465,8 @@ struct se_cmd {
62236 atomic_t t_se_count;
62237 atomic_t t_task_cdbs_left;
62238 atomic_t t_task_cdbs_ex_left;
62239 - atomic_t t_task_cdbs_sent;
62240 - atomic_t t_transport_aborted;
62241 + atomic_unchecked_t t_task_cdbs_sent;
62242 + atomic_unchecked_t t_transport_aborted;
62243 atomic_t t_transport_active;
62244 atomic_t t_transport_complete;
62245 atomic_t t_transport_queue_active;
62246 @@ -705,7 +705,7 @@ struct se_device {
62247 /* Active commands on this virtual SE device */
62248 atomic_t simple_cmds;
62249 atomic_t depth_left;
62250 - atomic_t dev_ordered_id;
62251 + atomic_unchecked_t dev_ordered_id;
62252 atomic_t execute_tasks;
62253 atomic_t dev_ordered_sync;
62254 atomic_t dev_qf_count;
62255 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
62256 index 1c09820..7f5ec79 100644
62257 --- a/include/trace/events/irq.h
62258 +++ b/include/trace/events/irq.h
62259 @@ -36,7 +36,7 @@ struct softirq_action;
62260 */
62261 TRACE_EVENT(irq_handler_entry,
62262
62263 - TP_PROTO(int irq, struct irqaction *action),
62264 + TP_PROTO(int irq, const struct irqaction *action),
62265
62266 TP_ARGS(irq, action),
62267
62268 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
62269 */
62270 TRACE_EVENT(irq_handler_exit,
62271
62272 - TP_PROTO(int irq, struct irqaction *action, int ret),
62273 + TP_PROTO(int irq, const struct irqaction *action, int ret),
62274
62275 TP_ARGS(irq, action, ret),
62276
62277 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
62278 index c41f308..6918de3 100644
62279 --- a/include/video/udlfb.h
62280 +++ b/include/video/udlfb.h
62281 @@ -52,10 +52,10 @@ struct dlfb_data {
62282 u32 pseudo_palette[256];
62283 int blank_mode; /*one of FB_BLANK_ */
62284 /* blit-only rendering path metrics, exposed through sysfs */
62285 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62286 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
62287 - atomic_t bytes_sent; /* to usb, after compression including overhead */
62288 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
62289 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62290 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
62291 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
62292 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
62293 };
62294
62295 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62296 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
62297 index 0993a22..32ba2fe 100644
62298 --- a/include/video/uvesafb.h
62299 +++ b/include/video/uvesafb.h
62300 @@ -177,6 +177,7 @@ struct uvesafb_par {
62301 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62302 u8 pmi_setpal; /* PMI for palette changes */
62303 u16 *pmi_base; /* protected mode interface location */
62304 + u8 *pmi_code; /* protected mode code location */
62305 void *pmi_start;
62306 void *pmi_pal;
62307 u8 *vbe_state_orig; /*
62308 diff --git a/init/Kconfig b/init/Kconfig
62309 index 43298f9..2f56c12 100644
62310 --- a/init/Kconfig
62311 +++ b/init/Kconfig
62312 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62313
62314 config COMPAT_BRK
62315 bool "Disable heap randomization"
62316 - default y
62317 + default n
62318 help
62319 Randomizing heap placement makes heap exploits harder, but it
62320 also breaks ancient binaries (including anything libc5 based).
62321 diff --git a/init/do_mounts.c b/init/do_mounts.c
62322 index db6e5ee..7677ff7 100644
62323 --- a/init/do_mounts.c
62324 +++ b/init/do_mounts.c
62325 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62326
62327 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62328 {
62329 - int err = sys_mount(name, "/root", fs, flags, data);
62330 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62331 if (err)
62332 return err;
62333
62334 - sys_chdir((const char __user __force *)"/root");
62335 + sys_chdir((const char __force_user*)"/root");
62336 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62337 printk(KERN_INFO
62338 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62339 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62340 va_start(args, fmt);
62341 vsprintf(buf, fmt, args);
62342 va_end(args);
62343 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62344 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62345 if (fd >= 0) {
62346 sys_ioctl(fd, FDEJECT, 0);
62347 sys_close(fd);
62348 }
62349 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62350 - fd = sys_open("/dev/console", O_RDWR, 0);
62351 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62352 if (fd >= 0) {
62353 sys_ioctl(fd, TCGETS, (long)&termios);
62354 termios.c_lflag &= ~ICANON;
62355 sys_ioctl(fd, TCSETSF, (long)&termios);
62356 - sys_read(fd, &c, 1);
62357 + sys_read(fd, (char __user *)&c, 1);
62358 termios.c_lflag |= ICANON;
62359 sys_ioctl(fd, TCSETSF, (long)&termios);
62360 sys_close(fd);
62361 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62362 mount_root();
62363 out:
62364 devtmpfs_mount("dev");
62365 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62366 - sys_chroot((const char __user __force *)".");
62367 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62368 + sys_chroot((const char __force_user *)".");
62369 }
62370 diff --git a/init/do_mounts.h b/init/do_mounts.h
62371 index f5b978a..69dbfe8 100644
62372 --- a/init/do_mounts.h
62373 +++ b/init/do_mounts.h
62374 @@ -15,15 +15,15 @@ extern int root_mountflags;
62375
62376 static inline int create_dev(char *name, dev_t dev)
62377 {
62378 - sys_unlink(name);
62379 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62380 + sys_unlink((char __force_user *)name);
62381 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62382 }
62383
62384 #if BITS_PER_LONG == 32
62385 static inline u32 bstat(char *name)
62386 {
62387 struct stat64 stat;
62388 - if (sys_stat64(name, &stat) != 0)
62389 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62390 return 0;
62391 if (!S_ISBLK(stat.st_mode))
62392 return 0;
62393 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62394 static inline u32 bstat(char *name)
62395 {
62396 struct stat stat;
62397 - if (sys_newstat(name, &stat) != 0)
62398 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62399 return 0;
62400 if (!S_ISBLK(stat.st_mode))
62401 return 0;
62402 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62403 index 3098a38..253064e 100644
62404 --- a/init/do_mounts_initrd.c
62405 +++ b/init/do_mounts_initrd.c
62406 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62407 create_dev("/dev/root.old", Root_RAM0);
62408 /* mount initrd on rootfs' /root */
62409 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62410 - sys_mkdir("/old", 0700);
62411 - root_fd = sys_open("/", 0, 0);
62412 - old_fd = sys_open("/old", 0, 0);
62413 + sys_mkdir((const char __force_user *)"/old", 0700);
62414 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
62415 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62416 /* move initrd over / and chdir/chroot in initrd root */
62417 - sys_chdir("/root");
62418 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62419 - sys_chroot(".");
62420 + sys_chdir((const char __force_user *)"/root");
62421 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62422 + sys_chroot((const char __force_user *)".");
62423
62424 /*
62425 * In case that a resume from disk is carried out by linuxrc or one of
62426 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62427
62428 /* move initrd to rootfs' /old */
62429 sys_fchdir(old_fd);
62430 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
62431 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62432 /* switch root and cwd back to / of rootfs */
62433 sys_fchdir(root_fd);
62434 - sys_chroot(".");
62435 + sys_chroot((const char __force_user *)".");
62436 sys_close(old_fd);
62437 sys_close(root_fd);
62438
62439 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62440 - sys_chdir("/old");
62441 + sys_chdir((const char __force_user *)"/old");
62442 return;
62443 }
62444
62445 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62446 mount_root();
62447
62448 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62449 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62450 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62451 if (!error)
62452 printk("okay\n");
62453 else {
62454 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
62455 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62456 if (error == -ENOENT)
62457 printk("/initrd does not exist. Ignored.\n");
62458 else
62459 printk("failed\n");
62460 printk(KERN_NOTICE "Unmounting old root\n");
62461 - sys_umount("/old", MNT_DETACH);
62462 + sys_umount((char __force_user *)"/old", MNT_DETACH);
62463 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62464 if (fd < 0) {
62465 error = fd;
62466 @@ -116,11 +116,11 @@ int __init initrd_load(void)
62467 * mounted in the normal path.
62468 */
62469 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62470 - sys_unlink("/initrd.image");
62471 + sys_unlink((const char __force_user *)"/initrd.image");
62472 handle_initrd();
62473 return 1;
62474 }
62475 }
62476 - sys_unlink("/initrd.image");
62477 + sys_unlink((const char __force_user *)"/initrd.image");
62478 return 0;
62479 }
62480 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62481 index 32c4799..c27ee74 100644
62482 --- a/init/do_mounts_md.c
62483 +++ b/init/do_mounts_md.c
62484 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62485 partitioned ? "_d" : "", minor,
62486 md_setup_args[ent].device_names);
62487
62488 - fd = sys_open(name, 0, 0);
62489 + fd = sys_open((char __force_user *)name, 0, 0);
62490 if (fd < 0) {
62491 printk(KERN_ERR "md: open failed - cannot start "
62492 "array %s\n", name);
62493 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62494 * array without it
62495 */
62496 sys_close(fd);
62497 - fd = sys_open(name, 0, 0);
62498 + fd = sys_open((char __force_user *)name, 0, 0);
62499 sys_ioctl(fd, BLKRRPART, 0);
62500 }
62501 sys_close(fd);
62502 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62503
62504 wait_for_device_probe();
62505
62506 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62507 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62508 if (fd >= 0) {
62509 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62510 sys_close(fd);
62511 diff --git a/init/initramfs.c b/init/initramfs.c
62512 index 2531811..040d4d4 100644
62513 --- a/init/initramfs.c
62514 +++ b/init/initramfs.c
62515 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62516 }
62517 }
62518
62519 -static long __init do_utime(char __user *filename, time_t mtime)
62520 +static long __init do_utime(__force char __user *filename, time_t mtime)
62521 {
62522 struct timespec t[2];
62523
62524 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62525 struct dir_entry *de, *tmp;
62526 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62527 list_del(&de->list);
62528 - do_utime(de->name, de->mtime);
62529 + do_utime((char __force_user *)de->name, de->mtime);
62530 kfree(de->name);
62531 kfree(de);
62532 }
62533 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62534 if (nlink >= 2) {
62535 char *old = find_link(major, minor, ino, mode, collected);
62536 if (old)
62537 - return (sys_link(old, collected) < 0) ? -1 : 1;
62538 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62539 }
62540 return 0;
62541 }
62542 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62543 {
62544 struct stat st;
62545
62546 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62547 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62548 if (S_ISDIR(st.st_mode))
62549 - sys_rmdir(path);
62550 + sys_rmdir((char __force_user *)path);
62551 else
62552 - sys_unlink(path);
62553 + sys_unlink((char __force_user *)path);
62554 }
62555 }
62556
62557 @@ -305,7 +305,7 @@ static int __init do_name(void)
62558 int openflags = O_WRONLY|O_CREAT;
62559 if (ml != 1)
62560 openflags |= O_TRUNC;
62561 - wfd = sys_open(collected, openflags, mode);
62562 + wfd = sys_open((char __force_user *)collected, openflags, mode);
62563
62564 if (wfd >= 0) {
62565 sys_fchown(wfd, uid, gid);
62566 @@ -317,17 +317,17 @@ static int __init do_name(void)
62567 }
62568 }
62569 } else if (S_ISDIR(mode)) {
62570 - sys_mkdir(collected, mode);
62571 - sys_chown(collected, uid, gid);
62572 - sys_chmod(collected, mode);
62573 + sys_mkdir((char __force_user *)collected, mode);
62574 + sys_chown((char __force_user *)collected, uid, gid);
62575 + sys_chmod((char __force_user *)collected, mode);
62576 dir_add(collected, mtime);
62577 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62578 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62579 if (maybe_link() == 0) {
62580 - sys_mknod(collected, mode, rdev);
62581 - sys_chown(collected, uid, gid);
62582 - sys_chmod(collected, mode);
62583 - do_utime(collected, mtime);
62584 + sys_mknod((char __force_user *)collected, mode, rdev);
62585 + sys_chown((char __force_user *)collected, uid, gid);
62586 + sys_chmod((char __force_user *)collected, mode);
62587 + do_utime((char __force_user *)collected, mtime);
62588 }
62589 }
62590 return 0;
62591 @@ -336,15 +336,15 @@ static int __init do_name(void)
62592 static int __init do_copy(void)
62593 {
62594 if (count >= body_len) {
62595 - sys_write(wfd, victim, body_len);
62596 + sys_write(wfd, (char __force_user *)victim, body_len);
62597 sys_close(wfd);
62598 - do_utime(vcollected, mtime);
62599 + do_utime((char __force_user *)vcollected, mtime);
62600 kfree(vcollected);
62601 eat(body_len);
62602 state = SkipIt;
62603 return 0;
62604 } else {
62605 - sys_write(wfd, victim, count);
62606 + sys_write(wfd, (char __force_user *)victim, count);
62607 body_len -= count;
62608 eat(count);
62609 return 1;
62610 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62611 {
62612 collected[N_ALIGN(name_len) + body_len] = '\0';
62613 clean_path(collected, 0);
62614 - sys_symlink(collected + N_ALIGN(name_len), collected);
62615 - sys_lchown(collected, uid, gid);
62616 - do_utime(collected, mtime);
62617 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62618 + sys_lchown((char __force_user *)collected, uid, gid);
62619 + do_utime((char __force_user *)collected, mtime);
62620 state = SkipIt;
62621 next_state = Reset;
62622 return 0;
62623 diff --git a/init/main.c b/init/main.c
62624 index 217ed23..ec5406f 100644
62625 --- a/init/main.c
62626 +++ b/init/main.c
62627 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62628 extern void tc_init(void);
62629 #endif
62630
62631 +extern void grsecurity_init(void);
62632 +
62633 /*
62634 * Debug helper: via this flag we know that we are in 'early bootup code'
62635 * where only the boot processor is running with IRQ disabled. This means
62636 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62637
62638 __setup("reset_devices", set_reset_devices);
62639
62640 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62641 +extern char pax_enter_kernel_user[];
62642 +extern char pax_exit_kernel_user[];
62643 +extern pgdval_t clone_pgd_mask;
62644 +#endif
62645 +
62646 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62647 +static int __init setup_pax_nouderef(char *str)
62648 +{
62649 +#ifdef CONFIG_X86_32
62650 + unsigned int cpu;
62651 + struct desc_struct *gdt;
62652 +
62653 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
62654 + gdt = get_cpu_gdt_table(cpu);
62655 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62656 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62657 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62658 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62659 + }
62660 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62661 +#else
62662 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62663 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62664 + clone_pgd_mask = ~(pgdval_t)0UL;
62665 +#endif
62666 +
62667 + return 0;
62668 +}
62669 +early_param("pax_nouderef", setup_pax_nouderef);
62670 +#endif
62671 +
62672 +#ifdef CONFIG_PAX_SOFTMODE
62673 +int pax_softmode;
62674 +
62675 +static int __init setup_pax_softmode(char *str)
62676 +{
62677 + get_option(&str, &pax_softmode);
62678 + return 1;
62679 +}
62680 +__setup("pax_softmode=", setup_pax_softmode);
62681 +#endif
62682 +
62683 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62684 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62685 static const char *panic_later, *panic_param;
62686 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62687 {
62688 int count = preempt_count();
62689 int ret;
62690 + const char *msg1 = "", *msg2 = "";
62691
62692 if (initcall_debug)
62693 ret = do_one_initcall_debug(fn);
62694 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62695 sprintf(msgbuf, "error code %d ", ret);
62696
62697 if (preempt_count() != count) {
62698 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62699 + msg1 = " preemption imbalance";
62700 preempt_count() = count;
62701 }
62702 if (irqs_disabled()) {
62703 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62704 + msg2 = " disabled interrupts";
62705 local_irq_enable();
62706 }
62707 - if (msgbuf[0]) {
62708 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62709 + if (msgbuf[0] || *msg1 || *msg2) {
62710 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62711 }
62712
62713 return ret;
62714 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62715 do_basic_setup();
62716
62717 /* Open the /dev/console on the rootfs, this should never fail */
62718 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62719 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62720 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62721
62722 (void) sys_dup(0);
62723 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62724 if (!ramdisk_execute_command)
62725 ramdisk_execute_command = "/init";
62726
62727 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62728 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62729 ramdisk_execute_command = NULL;
62730 prepare_namespace();
62731 }
62732
62733 + grsecurity_init();
62734 +
62735 /*
62736 * Ok, we have completed the initial bootup, and
62737 * we're essentially up and running. Get rid of the
62738 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62739 index 5b4293d..f179875 100644
62740 --- a/ipc/mqueue.c
62741 +++ b/ipc/mqueue.c
62742 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62743 mq_bytes = (mq_msg_tblsz +
62744 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62745
62746 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62747 spin_lock(&mq_lock);
62748 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62749 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62750 diff --git a/ipc/msg.c b/ipc/msg.c
62751 index 7385de2..a8180e08 100644
62752 --- a/ipc/msg.c
62753 +++ b/ipc/msg.c
62754 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62755 return security_msg_queue_associate(msq, msgflg);
62756 }
62757
62758 +static struct ipc_ops msg_ops = {
62759 + .getnew = newque,
62760 + .associate = msg_security,
62761 + .more_checks = NULL
62762 +};
62763 +
62764 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62765 {
62766 struct ipc_namespace *ns;
62767 - struct ipc_ops msg_ops;
62768 struct ipc_params msg_params;
62769
62770 ns = current->nsproxy->ipc_ns;
62771
62772 - msg_ops.getnew = newque;
62773 - msg_ops.associate = msg_security;
62774 - msg_ops.more_checks = NULL;
62775 -
62776 msg_params.key = key;
62777 msg_params.flg = msgflg;
62778
62779 diff --git a/ipc/sem.c b/ipc/sem.c
62780 index 5215a81..cfc0cac 100644
62781 --- a/ipc/sem.c
62782 +++ b/ipc/sem.c
62783 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62784 return 0;
62785 }
62786
62787 +static struct ipc_ops sem_ops = {
62788 + .getnew = newary,
62789 + .associate = sem_security,
62790 + .more_checks = sem_more_checks
62791 +};
62792 +
62793 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62794 {
62795 struct ipc_namespace *ns;
62796 - struct ipc_ops sem_ops;
62797 struct ipc_params sem_params;
62798
62799 ns = current->nsproxy->ipc_ns;
62800 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62801 if (nsems < 0 || nsems > ns->sc_semmsl)
62802 return -EINVAL;
62803
62804 - sem_ops.getnew = newary;
62805 - sem_ops.associate = sem_security;
62806 - sem_ops.more_checks = sem_more_checks;
62807 -
62808 sem_params.key = key;
62809 sem_params.flg = semflg;
62810 sem_params.u.nsems = nsems;
62811 diff --git a/ipc/shm.c b/ipc/shm.c
62812 index b76be5b..859e750 100644
62813 --- a/ipc/shm.c
62814 +++ b/ipc/shm.c
62815 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62816 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62817 #endif
62818
62819 +#ifdef CONFIG_GRKERNSEC
62820 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62821 + const time_t shm_createtime, const uid_t cuid,
62822 + const int shmid);
62823 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62824 + const time_t shm_createtime);
62825 +#endif
62826 +
62827 void shm_init_ns(struct ipc_namespace *ns)
62828 {
62829 ns->shm_ctlmax = SHMMAX;
62830 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62831 shp->shm_lprid = 0;
62832 shp->shm_atim = shp->shm_dtim = 0;
62833 shp->shm_ctim = get_seconds();
62834 +#ifdef CONFIG_GRKERNSEC
62835 + {
62836 + struct timespec timeval;
62837 + do_posix_clock_monotonic_gettime(&timeval);
62838 +
62839 + shp->shm_createtime = timeval.tv_sec;
62840 + }
62841 +#endif
62842 shp->shm_segsz = size;
62843 shp->shm_nattch = 0;
62844 shp->shm_file = file;
62845 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62846 return 0;
62847 }
62848
62849 +static struct ipc_ops shm_ops = {
62850 + .getnew = newseg,
62851 + .associate = shm_security,
62852 + .more_checks = shm_more_checks
62853 +};
62854 +
62855 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62856 {
62857 struct ipc_namespace *ns;
62858 - struct ipc_ops shm_ops;
62859 struct ipc_params shm_params;
62860
62861 ns = current->nsproxy->ipc_ns;
62862
62863 - shm_ops.getnew = newseg;
62864 - shm_ops.associate = shm_security;
62865 - shm_ops.more_checks = shm_more_checks;
62866 -
62867 shm_params.key = key;
62868 shm_params.flg = shmflg;
62869 shm_params.u.size = size;
62870 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62871 f_mode = FMODE_READ | FMODE_WRITE;
62872 }
62873 if (shmflg & SHM_EXEC) {
62874 +
62875 +#ifdef CONFIG_PAX_MPROTECT
62876 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
62877 + goto out;
62878 +#endif
62879 +
62880 prot |= PROT_EXEC;
62881 acc_mode |= S_IXUGO;
62882 }
62883 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62884 if (err)
62885 goto out_unlock;
62886
62887 +#ifdef CONFIG_GRKERNSEC
62888 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62889 + shp->shm_perm.cuid, shmid) ||
62890 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62891 + err = -EACCES;
62892 + goto out_unlock;
62893 + }
62894 +#endif
62895 +
62896 path = shp->shm_file->f_path;
62897 path_get(&path);
62898 shp->shm_nattch++;
62899 +#ifdef CONFIG_GRKERNSEC
62900 + shp->shm_lapid = current->pid;
62901 +#endif
62902 size = i_size_read(path.dentry->d_inode);
62903 shm_unlock(shp);
62904
62905 diff --git a/kernel/acct.c b/kernel/acct.c
62906 index fa7eb3d..7faf116 100644
62907 --- a/kernel/acct.c
62908 +++ b/kernel/acct.c
62909 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62910 */
62911 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62912 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62913 - file->f_op->write(file, (char *)&ac,
62914 + file->f_op->write(file, (char __force_user *)&ac,
62915 sizeof(acct_t), &file->f_pos);
62916 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62917 set_fs(fs);
62918 diff --git a/kernel/audit.c b/kernel/audit.c
62919 index 09fae26..ed71d5b 100644
62920 --- a/kernel/audit.c
62921 +++ b/kernel/audit.c
62922 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62923 3) suppressed due to audit_rate_limit
62924 4) suppressed due to audit_backlog_limit
62925 */
62926 -static atomic_t audit_lost = ATOMIC_INIT(0);
62927 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62928
62929 /* The netlink socket. */
62930 static struct sock *audit_sock;
62931 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62932 unsigned long now;
62933 int print;
62934
62935 - atomic_inc(&audit_lost);
62936 + atomic_inc_unchecked(&audit_lost);
62937
62938 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62939
62940 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62941 printk(KERN_WARNING
62942 "audit: audit_lost=%d audit_rate_limit=%d "
62943 "audit_backlog_limit=%d\n",
62944 - atomic_read(&audit_lost),
62945 + atomic_read_unchecked(&audit_lost),
62946 audit_rate_limit,
62947 audit_backlog_limit);
62948 audit_panic(message);
62949 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62950 status_set.pid = audit_pid;
62951 status_set.rate_limit = audit_rate_limit;
62952 status_set.backlog_limit = audit_backlog_limit;
62953 - status_set.lost = atomic_read(&audit_lost);
62954 + status_set.lost = atomic_read_unchecked(&audit_lost);
62955 status_set.backlog = skb_queue_len(&audit_skb_queue);
62956 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62957 &status_set, sizeof(status_set));
62958 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62959 avail = audit_expand(ab,
62960 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62961 if (!avail)
62962 - goto out;
62963 + goto out_va_end;
62964 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62965 }
62966 - va_end(args2);
62967 if (len > 0)
62968 skb_put(skb, len);
62969 +out_va_end:
62970 + va_end(args2);
62971 out:
62972 return;
62973 }
62974 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62975 index 47b7fc1..c003c33 100644
62976 --- a/kernel/auditsc.c
62977 +++ b/kernel/auditsc.c
62978 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62979 struct audit_buffer **ab,
62980 struct audit_aux_data_execve *axi)
62981 {
62982 - int i;
62983 - size_t len, len_sent = 0;
62984 + int i, len;
62985 + size_t len_sent = 0;
62986 const char __user *p;
62987 char *buf;
62988
62989 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62990 }
62991
62992 /* global counter which is incremented every time something logs in */
62993 -static atomic_t session_id = ATOMIC_INIT(0);
62994 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62995
62996 /**
62997 * audit_set_loginuid - set a task's audit_context loginuid
62998 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62999 */
63000 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
63001 {
63002 - unsigned int sessionid = atomic_inc_return(&session_id);
63003 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
63004 struct audit_context *context = task->audit_context;
63005
63006 if (context && context->in_syscall) {
63007 diff --git a/kernel/capability.c b/kernel/capability.c
63008 index b463871..fa3ea1f 100644
63009 --- a/kernel/capability.c
63010 +++ b/kernel/capability.c
63011 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
63012 * before modification is attempted and the application
63013 * fails.
63014 */
63015 + if (tocopy > ARRAY_SIZE(kdata))
63016 + return -EFAULT;
63017 +
63018 if (copy_to_user(dataptr, kdata, tocopy
63019 * sizeof(struct __user_cap_data_struct))) {
63020 return -EFAULT;
63021 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
63022 BUG();
63023 }
63024
63025 - if (security_capable(ns, current_cred(), cap) == 0) {
63026 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
63027 current->flags |= PF_SUPERPRIV;
63028 return true;
63029 }
63030 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
63031 }
63032 EXPORT_SYMBOL(ns_capable);
63033
63034 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
63035 +{
63036 + if (unlikely(!cap_valid(cap))) {
63037 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
63038 + BUG();
63039 + }
63040 +
63041 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
63042 + current->flags |= PF_SUPERPRIV;
63043 + return true;
63044 + }
63045 + return false;
63046 +}
63047 +EXPORT_SYMBOL(ns_capable_nolog);
63048 +
63049 +bool capable_nolog(int cap)
63050 +{
63051 + return ns_capable_nolog(&init_user_ns, cap);
63052 +}
63053 +EXPORT_SYMBOL(capable_nolog);
63054 +
63055 /**
63056 * task_ns_capable - Determine whether current task has a superior
63057 * capability targeted at a specific task's user namespace.
63058 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
63059 }
63060 EXPORT_SYMBOL(task_ns_capable);
63061
63062 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
63063 +{
63064 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
63065 +}
63066 +EXPORT_SYMBOL(task_ns_capable_nolog);
63067 +
63068 /**
63069 * nsown_capable - Check superior capability to one's own user_ns
63070 * @cap: The capability in question
63071 diff --git a/kernel/compat.c b/kernel/compat.c
63072 index f346ced..aa2b1f4 100644
63073 --- a/kernel/compat.c
63074 +++ b/kernel/compat.c
63075 @@ -13,6 +13,7 @@
63076
63077 #include <linux/linkage.h>
63078 #include <linux/compat.h>
63079 +#include <linux/module.h>
63080 #include <linux/errno.h>
63081 #include <linux/time.h>
63082 #include <linux/signal.h>
63083 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
63084 mm_segment_t oldfs;
63085 long ret;
63086
63087 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
63088 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
63089 oldfs = get_fs();
63090 set_fs(KERNEL_DS);
63091 ret = hrtimer_nanosleep_restart(restart);
63092 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
63093 oldfs = get_fs();
63094 set_fs(KERNEL_DS);
63095 ret = hrtimer_nanosleep(&tu,
63096 - rmtp ? (struct timespec __user *)&rmt : NULL,
63097 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
63098 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
63099 set_fs(oldfs);
63100
63101 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
63102 mm_segment_t old_fs = get_fs();
63103
63104 set_fs(KERNEL_DS);
63105 - ret = sys_sigpending((old_sigset_t __user *) &s);
63106 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
63107 set_fs(old_fs);
63108 if (ret == 0)
63109 ret = put_user(s, set);
63110 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
63111 old_fs = get_fs();
63112 set_fs(KERNEL_DS);
63113 ret = sys_sigprocmask(how,
63114 - set ? (old_sigset_t __user *) &s : NULL,
63115 - oset ? (old_sigset_t __user *) &s : NULL);
63116 + set ? (old_sigset_t __force_user *) &s : NULL,
63117 + oset ? (old_sigset_t __force_user *) &s : NULL);
63118 set_fs(old_fs);
63119 if (ret == 0)
63120 if (oset)
63121 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
63122 mm_segment_t old_fs = get_fs();
63123
63124 set_fs(KERNEL_DS);
63125 - ret = sys_old_getrlimit(resource, &r);
63126 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
63127 set_fs(old_fs);
63128
63129 if (!ret) {
63130 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
63131 mm_segment_t old_fs = get_fs();
63132
63133 set_fs(KERNEL_DS);
63134 - ret = sys_getrusage(who, (struct rusage __user *) &r);
63135 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
63136 set_fs(old_fs);
63137
63138 if (ret)
63139 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
63140 set_fs (KERNEL_DS);
63141 ret = sys_wait4(pid,
63142 (stat_addr ?
63143 - (unsigned int __user *) &status : NULL),
63144 - options, (struct rusage __user *) &r);
63145 + (unsigned int __force_user *) &status : NULL),
63146 + options, (struct rusage __force_user *) &r);
63147 set_fs (old_fs);
63148
63149 if (ret > 0) {
63150 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
63151 memset(&info, 0, sizeof(info));
63152
63153 set_fs(KERNEL_DS);
63154 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
63155 - uru ? (struct rusage __user *)&ru : NULL);
63156 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
63157 + uru ? (struct rusage __force_user *)&ru : NULL);
63158 set_fs(old_fs);
63159
63160 if ((ret < 0) || (info.si_signo == 0))
63161 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
63162 oldfs = get_fs();
63163 set_fs(KERNEL_DS);
63164 err = sys_timer_settime(timer_id, flags,
63165 - (struct itimerspec __user *) &newts,
63166 - (struct itimerspec __user *) &oldts);
63167 + (struct itimerspec __force_user *) &newts,
63168 + (struct itimerspec __force_user *) &oldts);
63169 set_fs(oldfs);
63170 if (!err && old && put_compat_itimerspec(old, &oldts))
63171 return -EFAULT;
63172 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
63173 oldfs = get_fs();
63174 set_fs(KERNEL_DS);
63175 err = sys_timer_gettime(timer_id,
63176 - (struct itimerspec __user *) &ts);
63177 + (struct itimerspec __force_user *) &ts);
63178 set_fs(oldfs);
63179 if (!err && put_compat_itimerspec(setting, &ts))
63180 return -EFAULT;
63181 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
63182 oldfs = get_fs();
63183 set_fs(KERNEL_DS);
63184 err = sys_clock_settime(which_clock,
63185 - (struct timespec __user *) &ts);
63186 + (struct timespec __force_user *) &ts);
63187 set_fs(oldfs);
63188 return err;
63189 }
63190 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
63191 oldfs = get_fs();
63192 set_fs(KERNEL_DS);
63193 err = sys_clock_gettime(which_clock,
63194 - (struct timespec __user *) &ts);
63195 + (struct timespec __force_user *) &ts);
63196 set_fs(oldfs);
63197 if (!err && put_compat_timespec(&ts, tp))
63198 return -EFAULT;
63199 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
63200
63201 oldfs = get_fs();
63202 set_fs(KERNEL_DS);
63203 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
63204 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
63205 set_fs(oldfs);
63206
63207 err = compat_put_timex(utp, &txc);
63208 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
63209 oldfs = get_fs();
63210 set_fs(KERNEL_DS);
63211 err = sys_clock_getres(which_clock,
63212 - (struct timespec __user *) &ts);
63213 + (struct timespec __force_user *) &ts);
63214 set_fs(oldfs);
63215 if (!err && tp && put_compat_timespec(&ts, tp))
63216 return -EFAULT;
63217 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
63218 long err;
63219 mm_segment_t oldfs;
63220 struct timespec tu;
63221 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
63222 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
63223
63224 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
63225 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
63226 oldfs = get_fs();
63227 set_fs(KERNEL_DS);
63228 err = clock_nanosleep_restart(restart);
63229 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
63230 oldfs = get_fs();
63231 set_fs(KERNEL_DS);
63232 err = sys_clock_nanosleep(which_clock, flags,
63233 - (struct timespec __user *) &in,
63234 - (struct timespec __user *) &out);
63235 + (struct timespec __force_user *) &in,
63236 + (struct timespec __force_user *) &out);
63237 set_fs(oldfs);
63238
63239 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
63240 diff --git a/kernel/configs.c b/kernel/configs.c
63241 index 42e8fa0..9e7406b 100644
63242 --- a/kernel/configs.c
63243 +++ b/kernel/configs.c
63244 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
63245 struct proc_dir_entry *entry;
63246
63247 /* create the current config file */
63248 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63249 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
63250 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
63251 + &ikconfig_file_ops);
63252 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63253 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
63254 + &ikconfig_file_ops);
63255 +#endif
63256 +#else
63257 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
63258 &ikconfig_file_ops);
63259 +#endif
63260 +
63261 if (!entry)
63262 return -ENOMEM;
63263
63264 diff --git a/kernel/cred.c b/kernel/cred.c
63265 index 5791612..a3c04dc 100644
63266 --- a/kernel/cred.c
63267 +++ b/kernel/cred.c
63268 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
63269 validate_creds(cred);
63270 put_cred(cred);
63271 }
63272 +
63273 +#ifdef CONFIG_GRKERNSEC_SETXID
63274 + cred = (struct cred *) tsk->delayed_cred;
63275 + if (cred) {
63276 + tsk->delayed_cred = NULL;
63277 + validate_creds(cred);
63278 + put_cred(cred);
63279 + }
63280 +#endif
63281 }
63282
63283 /**
63284 @@ -470,7 +479,7 @@ error_put:
63285 * Always returns 0 thus allowing this function to be tail-called at the end
63286 * of, say, sys_setgid().
63287 */
63288 -int commit_creds(struct cred *new)
63289 +static int __commit_creds(struct cred *new)
63290 {
63291 struct task_struct *task = current;
63292 const struct cred *old = task->real_cred;
63293 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
63294
63295 get_cred(new); /* we will require a ref for the subj creds too */
63296
63297 + gr_set_role_label(task, new->uid, new->gid);
63298 +
63299 /* dumpability changes */
63300 if (old->euid != new->euid ||
63301 old->egid != new->egid ||
63302 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63303 put_cred(old);
63304 return 0;
63305 }
63306 +#ifdef CONFIG_GRKERNSEC_SETXID
63307 +extern int set_user(struct cred *new);
63308 +
63309 +void gr_delayed_cred_worker(void)
63310 +{
63311 + const struct cred *new = current->delayed_cred;
63312 + struct cred *ncred;
63313 +
63314 + current->delayed_cred = NULL;
63315 +
63316 + if (current_uid() && new != NULL) {
63317 + // from doing get_cred on it when queueing this
63318 + put_cred(new);
63319 + return;
63320 + } else if (new == NULL)
63321 + return;
63322 +
63323 + ncred = prepare_creds();
63324 + if (!ncred)
63325 + goto die;
63326 + // uids
63327 + ncred->uid = new->uid;
63328 + ncred->euid = new->euid;
63329 + ncred->suid = new->suid;
63330 + ncred->fsuid = new->fsuid;
63331 + // gids
63332 + ncred->gid = new->gid;
63333 + ncred->egid = new->egid;
63334 + ncred->sgid = new->sgid;
63335 + ncred->fsgid = new->fsgid;
63336 + // groups
63337 + if (set_groups(ncred, new->group_info) < 0) {
63338 + abort_creds(ncred);
63339 + goto die;
63340 + }
63341 + // caps
63342 + ncred->securebits = new->securebits;
63343 + ncred->cap_inheritable = new->cap_inheritable;
63344 + ncred->cap_permitted = new->cap_permitted;
63345 + ncred->cap_effective = new->cap_effective;
63346 + ncred->cap_bset = new->cap_bset;
63347 +
63348 + if (set_user(ncred)) {
63349 + abort_creds(ncred);
63350 + goto die;
63351 + }
63352 +
63353 + // from doing get_cred on it when queueing this
63354 + put_cred(new);
63355 +
63356 + __commit_creds(ncred);
63357 + return;
63358 +die:
63359 + // from doing get_cred on it when queueing this
63360 + put_cred(new);
63361 + do_group_exit(SIGKILL);
63362 +}
63363 +#endif
63364 +
63365 +int commit_creds(struct cred *new)
63366 +{
63367 +#ifdef CONFIG_GRKERNSEC_SETXID
63368 + struct task_struct *t;
63369 +
63370 + /* we won't get called with tasklist_lock held for writing
63371 + and interrupts disabled as the cred struct in that case is
63372 + init_cred
63373 + */
63374 + if (grsec_enable_setxid && !current_is_single_threaded() &&
63375 + !current_uid() && new->uid) {
63376 + rcu_read_lock();
63377 + read_lock(&tasklist_lock);
63378 + for (t = next_thread(current); t != current;
63379 + t = next_thread(t)) {
63380 + if (t->delayed_cred == NULL) {
63381 + t->delayed_cred = get_cred(new);
63382 + set_tsk_need_resched(t);
63383 + }
63384 + }
63385 + read_unlock(&tasklist_lock);
63386 + rcu_read_unlock();
63387 + }
63388 +#endif
63389 + return __commit_creds(new);
63390 +}
63391 +
63392 EXPORT_SYMBOL(commit_creds);
63393
63394 /**
63395 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63396 index 0d7c087..01b8cef 100644
63397 --- a/kernel/debug/debug_core.c
63398 +++ b/kernel/debug/debug_core.c
63399 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63400 */
63401 static atomic_t masters_in_kgdb;
63402 static atomic_t slaves_in_kgdb;
63403 -static atomic_t kgdb_break_tasklet_var;
63404 +static atomic_unchecked_t kgdb_break_tasklet_var;
63405 atomic_t kgdb_setting_breakpoint;
63406
63407 struct task_struct *kgdb_usethread;
63408 @@ -129,7 +129,7 @@ int kgdb_single_step;
63409 static pid_t kgdb_sstep_pid;
63410
63411 /* to keep track of the CPU which is doing the single stepping*/
63412 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63413 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63414
63415 /*
63416 * If you are debugging a problem where roundup (the collection of
63417 @@ -542,7 +542,7 @@ return_normal:
63418 * kernel will only try for the value of sstep_tries before
63419 * giving up and continuing on.
63420 */
63421 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63422 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63423 (kgdb_info[cpu].task &&
63424 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63425 atomic_set(&kgdb_active, -1);
63426 @@ -636,8 +636,8 @@ cpu_master_loop:
63427 }
63428
63429 kgdb_restore:
63430 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63431 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63432 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63433 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63434 if (kgdb_info[sstep_cpu].task)
63435 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63436 else
63437 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63438 static void kgdb_tasklet_bpt(unsigned long ing)
63439 {
63440 kgdb_breakpoint();
63441 - atomic_set(&kgdb_break_tasklet_var, 0);
63442 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63443 }
63444
63445 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63446
63447 void kgdb_schedule_breakpoint(void)
63448 {
63449 - if (atomic_read(&kgdb_break_tasklet_var) ||
63450 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63451 atomic_read(&kgdb_active) != -1 ||
63452 atomic_read(&kgdb_setting_breakpoint))
63453 return;
63454 - atomic_inc(&kgdb_break_tasklet_var);
63455 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
63456 tasklet_schedule(&kgdb_tasklet_breakpoint);
63457 }
63458 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63459 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63460 index 63786e7..0780cac 100644
63461 --- a/kernel/debug/kdb/kdb_main.c
63462 +++ b/kernel/debug/kdb/kdb_main.c
63463 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63464 list_for_each_entry(mod, kdb_modules, list) {
63465
63466 kdb_printf("%-20s%8u 0x%p ", mod->name,
63467 - mod->core_size, (void *)mod);
63468 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
63469 #ifdef CONFIG_MODULE_UNLOAD
63470 kdb_printf("%4d ", module_refcount(mod));
63471 #endif
63472 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63473 kdb_printf(" (Loading)");
63474 else
63475 kdb_printf(" (Live)");
63476 - kdb_printf(" 0x%p", mod->module_core);
63477 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63478
63479 #ifdef CONFIG_MODULE_UNLOAD
63480 {
63481 diff --git a/kernel/events/core.c b/kernel/events/core.c
63482 index 58690af..d903d75 100644
63483 --- a/kernel/events/core.c
63484 +++ b/kernel/events/core.c
63485 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63486 return 0;
63487 }
63488
63489 -static atomic64_t perf_event_id;
63490 +static atomic64_unchecked_t perf_event_id;
63491
63492 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63493 enum event_type_t event_type);
63494 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63495
63496 static inline u64 perf_event_count(struct perf_event *event)
63497 {
63498 - return local64_read(&event->count) + atomic64_read(&event->child_count);
63499 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63500 }
63501
63502 static u64 perf_event_read(struct perf_event *event)
63503 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63504 mutex_lock(&event->child_mutex);
63505 total += perf_event_read(event);
63506 *enabled += event->total_time_enabled +
63507 - atomic64_read(&event->child_total_time_enabled);
63508 + atomic64_read_unchecked(&event->child_total_time_enabled);
63509 *running += event->total_time_running +
63510 - atomic64_read(&event->child_total_time_running);
63511 + atomic64_read_unchecked(&event->child_total_time_running);
63512
63513 list_for_each_entry(child, &event->child_list, child_list) {
63514 total += perf_event_read(child);
63515 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63516 userpg->offset -= local64_read(&event->hw.prev_count);
63517
63518 userpg->time_enabled = enabled +
63519 - atomic64_read(&event->child_total_time_enabled);
63520 + atomic64_read_unchecked(&event->child_total_time_enabled);
63521
63522 userpg->time_running = running +
63523 - atomic64_read(&event->child_total_time_running);
63524 + atomic64_read_unchecked(&event->child_total_time_running);
63525
63526 barrier();
63527 ++userpg->lock;
63528 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63529 values[n++] = perf_event_count(event);
63530 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63531 values[n++] = enabled +
63532 - atomic64_read(&event->child_total_time_enabled);
63533 + atomic64_read_unchecked(&event->child_total_time_enabled);
63534 }
63535 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63536 values[n++] = running +
63537 - atomic64_read(&event->child_total_time_running);
63538 + atomic64_read_unchecked(&event->child_total_time_running);
63539 }
63540 if (read_format & PERF_FORMAT_ID)
63541 values[n++] = primary_event_id(event);
63542 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63543 * need to add enough zero bytes after the string to handle
63544 * the 64bit alignment we do later.
63545 */
63546 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63547 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
63548 if (!buf) {
63549 name = strncpy(tmp, "//enomem", sizeof(tmp));
63550 goto got_name;
63551 }
63552 - name = d_path(&file->f_path, buf, PATH_MAX);
63553 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63554 if (IS_ERR(name)) {
63555 name = strncpy(tmp, "//toolong", sizeof(tmp));
63556 goto got_name;
63557 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63558 event->parent = parent_event;
63559
63560 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63561 - event->id = atomic64_inc_return(&perf_event_id);
63562 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
63563
63564 event->state = PERF_EVENT_STATE_INACTIVE;
63565
63566 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63567 /*
63568 * Add back the child's count to the parent's count:
63569 */
63570 - atomic64_add(child_val, &parent_event->child_count);
63571 - atomic64_add(child_event->total_time_enabled,
63572 + atomic64_add_unchecked(child_val, &parent_event->child_count);
63573 + atomic64_add_unchecked(child_event->total_time_enabled,
63574 &parent_event->child_total_time_enabled);
63575 - atomic64_add(child_event->total_time_running,
63576 + atomic64_add_unchecked(child_event->total_time_running,
63577 &parent_event->child_total_time_running);
63578
63579 /*
63580 diff --git a/kernel/exit.c b/kernel/exit.c
63581 index e6e01b9..619f837 100644
63582 --- a/kernel/exit.c
63583 +++ b/kernel/exit.c
63584 @@ -57,6 +57,10 @@
63585 #include <asm/pgtable.h>
63586 #include <asm/mmu_context.h>
63587
63588 +#ifdef CONFIG_GRKERNSEC
63589 +extern rwlock_t grsec_exec_file_lock;
63590 +#endif
63591 +
63592 static void exit_mm(struct task_struct * tsk);
63593
63594 static void __unhash_process(struct task_struct *p, bool group_dead)
63595 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63596 struct task_struct *leader;
63597 int zap_leader;
63598 repeat:
63599 +#ifdef CONFIG_NET
63600 + gr_del_task_from_ip_table(p);
63601 +#endif
63602 +
63603 /* don't need to get the RCU readlock here - the process is dead and
63604 * can't be modifying its own credentials. But shut RCU-lockdep up */
63605 rcu_read_lock();
63606 @@ -380,7 +388,7 @@ int allow_signal(int sig)
63607 * know it'll be handled, so that they don't get converted to
63608 * SIGKILL or just silently dropped.
63609 */
63610 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63611 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63612 recalc_sigpending();
63613 spin_unlock_irq(&current->sighand->siglock);
63614 return 0;
63615 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63616 vsnprintf(current->comm, sizeof(current->comm), name, args);
63617 va_end(args);
63618
63619 +#ifdef CONFIG_GRKERNSEC
63620 + write_lock(&grsec_exec_file_lock);
63621 + if (current->exec_file) {
63622 + fput(current->exec_file);
63623 + current->exec_file = NULL;
63624 + }
63625 + write_unlock(&grsec_exec_file_lock);
63626 +#endif
63627 +
63628 + gr_set_kernel_label(current);
63629 +
63630 /*
63631 * If we were started as result of loading a module, close all of the
63632 * user space pages. We don't need them, and if we didn't close them
63633 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63634 struct task_struct *tsk = current;
63635 int group_dead;
63636
63637 + set_fs(USER_DS);
63638 +
63639 profile_task_exit(tsk);
63640
63641 WARN_ON(blk_needs_flush_plug(tsk));
63642 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63643 * mm_release()->clear_child_tid() from writing to a user-controlled
63644 * kernel address.
63645 */
63646 - set_fs(USER_DS);
63647
63648 ptrace_event(PTRACE_EVENT_EXIT, code);
63649
63650 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63651 tsk->exit_code = code;
63652 taskstats_exit(tsk, group_dead);
63653
63654 + gr_acl_handle_psacct(tsk, code);
63655 + gr_acl_handle_exit();
63656 +
63657 exit_mm(tsk);
63658
63659 if (group_dead)
63660 diff --git a/kernel/fork.c b/kernel/fork.c
63661 index 0acf42c0..9e40e2e 100644
63662 --- a/kernel/fork.c
63663 +++ b/kernel/fork.c
63664 @@ -281,7 +281,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63665 *stackend = STACK_END_MAGIC; /* for overflow detection */
63666
63667 #ifdef CONFIG_CC_STACKPROTECTOR
63668 - tsk->stack_canary = get_random_int();
63669 + tsk->stack_canary = pax_get_random_long();
63670 #endif
63671
63672 /*
63673 @@ -305,13 +305,77 @@ out:
63674 }
63675
63676 #ifdef CONFIG_MMU
63677 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63678 +{
63679 + struct vm_area_struct *tmp;
63680 + unsigned long charge;
63681 + struct mempolicy *pol;
63682 + struct file *file;
63683 +
63684 + charge = 0;
63685 + if (mpnt->vm_flags & VM_ACCOUNT) {
63686 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63687 + if (security_vm_enough_memory(len))
63688 + goto fail_nomem;
63689 + charge = len;
63690 + }
63691 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63692 + if (!tmp)
63693 + goto fail_nomem;
63694 + *tmp = *mpnt;
63695 + tmp->vm_mm = mm;
63696 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63697 + pol = mpol_dup(vma_policy(mpnt));
63698 + if (IS_ERR(pol))
63699 + goto fail_nomem_policy;
63700 + vma_set_policy(tmp, pol);
63701 + if (anon_vma_fork(tmp, mpnt))
63702 + goto fail_nomem_anon_vma_fork;
63703 + tmp->vm_flags &= ~VM_LOCKED;
63704 + tmp->vm_next = tmp->vm_prev = NULL;
63705 + tmp->vm_mirror = NULL;
63706 + file = tmp->vm_file;
63707 + if (file) {
63708 + struct inode *inode = file->f_path.dentry->d_inode;
63709 + struct address_space *mapping = file->f_mapping;
63710 +
63711 + get_file(file);
63712 + if (tmp->vm_flags & VM_DENYWRITE)
63713 + atomic_dec(&inode->i_writecount);
63714 + mutex_lock(&mapping->i_mmap_mutex);
63715 + if (tmp->vm_flags & VM_SHARED)
63716 + mapping->i_mmap_writable++;
63717 + flush_dcache_mmap_lock(mapping);
63718 + /* insert tmp into the share list, just after mpnt */
63719 + vma_prio_tree_add(tmp, mpnt);
63720 + flush_dcache_mmap_unlock(mapping);
63721 + mutex_unlock(&mapping->i_mmap_mutex);
63722 + }
63723 +
63724 + /*
63725 + * Clear hugetlb-related page reserves for children. This only
63726 + * affects MAP_PRIVATE mappings. Faults generated by the child
63727 + * are not guaranteed to succeed, even if read-only
63728 + */
63729 + if (is_vm_hugetlb_page(tmp))
63730 + reset_vma_resv_huge_pages(tmp);
63731 +
63732 + return tmp;
63733 +
63734 +fail_nomem_anon_vma_fork:
63735 + mpol_put(pol);
63736 +fail_nomem_policy:
63737 + kmem_cache_free(vm_area_cachep, tmp);
63738 +fail_nomem:
63739 + vm_unacct_memory(charge);
63740 + return NULL;
63741 +}
63742 +
63743 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63744 {
63745 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63746 struct rb_node **rb_link, *rb_parent;
63747 int retval;
63748 - unsigned long charge;
63749 - struct mempolicy *pol;
63750
63751 down_write(&oldmm->mmap_sem);
63752 flush_cache_dup_mm(oldmm);
63753 @@ -323,8 +387,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63754 mm->locked_vm = 0;
63755 mm->mmap = NULL;
63756 mm->mmap_cache = NULL;
63757 - mm->free_area_cache = oldmm->mmap_base;
63758 - mm->cached_hole_size = ~0UL;
63759 + mm->free_area_cache = oldmm->free_area_cache;
63760 + mm->cached_hole_size = oldmm->cached_hole_size;
63761 mm->map_count = 0;
63762 cpumask_clear(mm_cpumask(mm));
63763 mm->mm_rb = RB_ROOT;
63764 @@ -340,8 +404,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63765
63766 prev = NULL;
63767 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63768 - struct file *file;
63769 -
63770 if (mpnt->vm_flags & VM_DONTCOPY) {
63771 long pages = vma_pages(mpnt);
63772 mm->total_vm -= pages;
63773 @@ -349,53 +411,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63774 -pages);
63775 continue;
63776 }
63777 - charge = 0;
63778 - if (mpnt->vm_flags & VM_ACCOUNT) {
63779 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63780 - if (security_vm_enough_memory(len))
63781 - goto fail_nomem;
63782 - charge = len;
63783 + tmp = dup_vma(mm, mpnt);
63784 + if (!tmp) {
63785 + retval = -ENOMEM;
63786 + goto out;
63787 }
63788 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63789 - if (!tmp)
63790 - goto fail_nomem;
63791 - *tmp = *mpnt;
63792 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63793 - pol = mpol_dup(vma_policy(mpnt));
63794 - retval = PTR_ERR(pol);
63795 - if (IS_ERR(pol))
63796 - goto fail_nomem_policy;
63797 - vma_set_policy(tmp, pol);
63798 - tmp->vm_mm = mm;
63799 - if (anon_vma_fork(tmp, mpnt))
63800 - goto fail_nomem_anon_vma_fork;
63801 - tmp->vm_flags &= ~VM_LOCKED;
63802 - tmp->vm_next = tmp->vm_prev = NULL;
63803 - file = tmp->vm_file;
63804 - if (file) {
63805 - struct inode *inode = file->f_path.dentry->d_inode;
63806 - struct address_space *mapping = file->f_mapping;
63807 -
63808 - get_file(file);
63809 - if (tmp->vm_flags & VM_DENYWRITE)
63810 - atomic_dec(&inode->i_writecount);
63811 - mutex_lock(&mapping->i_mmap_mutex);
63812 - if (tmp->vm_flags & VM_SHARED)
63813 - mapping->i_mmap_writable++;
63814 - flush_dcache_mmap_lock(mapping);
63815 - /* insert tmp into the share list, just after mpnt */
63816 - vma_prio_tree_add(tmp, mpnt);
63817 - flush_dcache_mmap_unlock(mapping);
63818 - mutex_unlock(&mapping->i_mmap_mutex);
63819 - }
63820 -
63821 - /*
63822 - * Clear hugetlb-related page reserves for children. This only
63823 - * affects MAP_PRIVATE mappings. Faults generated by the child
63824 - * are not guaranteed to succeed, even if read-only
63825 - */
63826 - if (is_vm_hugetlb_page(tmp))
63827 - reset_vma_resv_huge_pages(tmp);
63828
63829 /*
63830 * Link in the new vma and copy the page table entries.
63831 @@ -418,6 +438,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63832 if (retval)
63833 goto out;
63834 }
63835 +
63836 +#ifdef CONFIG_PAX_SEGMEXEC
63837 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63838 + struct vm_area_struct *mpnt_m;
63839 +
63840 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63841 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63842 +
63843 + if (!mpnt->vm_mirror)
63844 + continue;
63845 +
63846 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63847 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63848 + mpnt->vm_mirror = mpnt_m;
63849 + } else {
63850 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63851 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63852 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63853 + mpnt->vm_mirror->vm_mirror = mpnt;
63854 + }
63855 + }
63856 + BUG_ON(mpnt_m);
63857 + }
63858 +#endif
63859 +
63860 /* a new mm has just been created */
63861 arch_dup_mmap(oldmm, mm);
63862 retval = 0;
63863 @@ -426,14 +471,6 @@ out:
63864 flush_tlb_mm(oldmm);
63865 up_write(&oldmm->mmap_sem);
63866 return retval;
63867 -fail_nomem_anon_vma_fork:
63868 - mpol_put(pol);
63869 -fail_nomem_policy:
63870 - kmem_cache_free(vm_area_cachep, tmp);
63871 -fail_nomem:
63872 - retval = -ENOMEM;
63873 - vm_unacct_memory(charge);
63874 - goto out;
63875 }
63876
63877 static inline int mm_alloc_pgd(struct mm_struct *mm)
63878 @@ -645,6 +682,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63879 }
63880 EXPORT_SYMBOL_GPL(get_task_mm);
63881
63882 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63883 +{
63884 + struct mm_struct *mm;
63885 + int err;
63886 +
63887 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63888 + if (err)
63889 + return ERR_PTR(err);
63890 +
63891 + mm = get_task_mm(task);
63892 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63893 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63894 + mmput(mm);
63895 + mm = ERR_PTR(-EACCES);
63896 + }
63897 + mutex_unlock(&task->signal->cred_guard_mutex);
63898 +
63899 + return mm;
63900 +}
63901 +
63902 /* Please note the differences between mmput and mm_release.
63903 * mmput is called whenever we stop holding onto a mm_struct,
63904 * error success whatever.
63905 @@ -830,13 +887,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63906 spin_unlock(&fs->lock);
63907 return -EAGAIN;
63908 }
63909 - fs->users++;
63910 + atomic_inc(&fs->users);
63911 spin_unlock(&fs->lock);
63912 return 0;
63913 }
63914 tsk->fs = copy_fs_struct(fs);
63915 if (!tsk->fs)
63916 return -ENOMEM;
63917 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63918 return 0;
63919 }
63920
63921 @@ -1100,6 +1158,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63922 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63923 #endif
63924 retval = -EAGAIN;
63925 +
63926 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63927 +
63928 if (atomic_read(&p->real_cred->user->processes) >=
63929 task_rlimit(p, RLIMIT_NPROC)) {
63930 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63931 @@ -1259,6 +1320,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63932 if (clone_flags & CLONE_THREAD)
63933 p->tgid = current->tgid;
63934
63935 + gr_copy_label(p);
63936 +
63937 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63938 /*
63939 * Clear TID on mm_release()?
63940 @@ -1421,6 +1484,8 @@ bad_fork_cleanup_count:
63941 bad_fork_free:
63942 free_task(p);
63943 fork_out:
63944 + gr_log_forkfail(retval);
63945 +
63946 return ERR_PTR(retval);
63947 }
63948
63949 @@ -1521,6 +1586,8 @@ long do_fork(unsigned long clone_flags,
63950 if (clone_flags & CLONE_PARENT_SETTID)
63951 put_user(nr, parent_tidptr);
63952
63953 + gr_handle_brute_check();
63954 +
63955 if (clone_flags & CLONE_VFORK) {
63956 p->vfork_done = &vfork;
63957 init_completion(&vfork);
63958 @@ -1630,7 +1697,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63959 return 0;
63960
63961 /* don't need lock here; in the worst case we'll do useless copy */
63962 - if (fs->users == 1)
63963 + if (atomic_read(&fs->users) == 1)
63964 return 0;
63965
63966 *new_fsp = copy_fs_struct(fs);
63967 @@ -1719,7 +1786,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63968 fs = current->fs;
63969 spin_lock(&fs->lock);
63970 current->fs = new_fs;
63971 - if (--fs->users)
63972 + gr_set_chroot_entries(current, &current->fs->root);
63973 + if (atomic_dec_return(&fs->users))
63974 new_fs = NULL;
63975 else
63976 new_fs = fs;
63977 diff --git a/kernel/futex.c b/kernel/futex.c
63978 index 1614be2..37abc7e 100644
63979 --- a/kernel/futex.c
63980 +++ b/kernel/futex.c
63981 @@ -54,6 +54,7 @@
63982 #include <linux/mount.h>
63983 #include <linux/pagemap.h>
63984 #include <linux/syscalls.h>
63985 +#include <linux/ptrace.h>
63986 #include <linux/signal.h>
63987 #include <linux/export.h>
63988 #include <linux/magic.h>
63989 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63990 struct page *page, *page_head;
63991 int err, ro = 0;
63992
63993 +#ifdef CONFIG_PAX_SEGMEXEC
63994 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63995 + return -EFAULT;
63996 +#endif
63997 +
63998 /*
63999 * The futex address must be "naturally" aligned.
64000 */
64001 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
64002 if (!p)
64003 goto err_unlock;
64004 ret = -EPERM;
64005 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64006 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
64007 + goto err_unlock;
64008 +#endif
64009 pcred = __task_cred(p);
64010 /* If victim is in different user_ns, then uids are not
64011 comparable, so we must have CAP_SYS_PTRACE */
64012 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
64013 {
64014 u32 curval;
64015 int i;
64016 + mm_segment_t oldfs;
64017
64018 /*
64019 * This will fail and we want it. Some arch implementations do
64020 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
64021 * implementation, the non-functional ones will return
64022 * -ENOSYS.
64023 */
64024 + oldfs = get_fs();
64025 + set_fs(USER_DS);
64026 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
64027 futex_cmpxchg_enabled = 1;
64028 + set_fs(oldfs);
64029
64030 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
64031 plist_head_init(&futex_queues[i].chain);
64032 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
64033 index 5f9e689..582d46d 100644
64034 --- a/kernel/futex_compat.c
64035 +++ b/kernel/futex_compat.c
64036 @@ -10,6 +10,7 @@
64037 #include <linux/compat.h>
64038 #include <linux/nsproxy.h>
64039 #include <linux/futex.h>
64040 +#include <linux/ptrace.h>
64041
64042 #include <asm/uaccess.h>
64043
64044 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64045 {
64046 struct compat_robust_list_head __user *head;
64047 unsigned long ret;
64048 - const struct cred *cred = current_cred(), *pcred;
64049 + const struct cred *cred = current_cred();
64050 + const struct cred *pcred;
64051
64052 if (!futex_cmpxchg_enabled)
64053 return -ENOSYS;
64054 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64055 if (!p)
64056 goto err_unlock;
64057 ret = -EPERM;
64058 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64059 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
64060 + goto err_unlock;
64061 +#endif
64062 pcred = __task_cred(p);
64063 /* If victim is in different user_ns, then uids are not
64064 comparable, so we must have CAP_SYS_PTRACE */
64065 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
64066 index 9b22d03..6295b62 100644
64067 --- a/kernel/gcov/base.c
64068 +++ b/kernel/gcov/base.c
64069 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
64070 }
64071
64072 #ifdef CONFIG_MODULES
64073 -static inline int within(void *addr, void *start, unsigned long size)
64074 -{
64075 - return ((addr >= start) && (addr < start + size));
64076 -}
64077 -
64078 /* Update list and generate events when modules are unloaded. */
64079 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64080 void *data)
64081 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64082 prev = NULL;
64083 /* Remove entries located in module from linked list. */
64084 for (info = gcov_info_head; info; info = info->next) {
64085 - if (within(info, mod->module_core, mod->core_size)) {
64086 + if (within_module_core_rw((unsigned long)info, mod)) {
64087 if (prev)
64088 prev->next = info->next;
64089 else
64090 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
64091 index ae34bf5..4e2f3d0 100644
64092 --- a/kernel/hrtimer.c
64093 +++ b/kernel/hrtimer.c
64094 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
64095 local_irq_restore(flags);
64096 }
64097
64098 -static void run_hrtimer_softirq(struct softirq_action *h)
64099 +static void run_hrtimer_softirq(void)
64100 {
64101 hrtimer_peek_ahead_timers();
64102 }
64103 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
64104 index 66ff710..05a5128 100644
64105 --- a/kernel/jump_label.c
64106 +++ b/kernel/jump_label.c
64107 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
64108
64109 size = (((unsigned long)stop - (unsigned long)start)
64110 / sizeof(struct jump_entry));
64111 + pax_open_kernel();
64112 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
64113 + pax_close_kernel();
64114 }
64115
64116 static void jump_label_update(struct jump_label_key *key, int enable);
64117 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
64118 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
64119 struct jump_entry *iter;
64120
64121 + pax_open_kernel();
64122 for (iter = iter_start; iter < iter_stop; iter++) {
64123 if (within_module_init(iter->code, mod))
64124 iter->code = 0;
64125 }
64126 + pax_close_kernel();
64127 }
64128
64129 static int
64130 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
64131 index 079f1d3..a407562 100644
64132 --- a/kernel/kallsyms.c
64133 +++ b/kernel/kallsyms.c
64134 @@ -11,6 +11,9 @@
64135 * Changed the compression method from stem compression to "table lookup"
64136 * compression (see scripts/kallsyms.c for a more complete description)
64137 */
64138 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64139 +#define __INCLUDED_BY_HIDESYM 1
64140 +#endif
64141 #include <linux/kallsyms.h>
64142 #include <linux/module.h>
64143 #include <linux/init.h>
64144 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
64145
64146 static inline int is_kernel_inittext(unsigned long addr)
64147 {
64148 + if (system_state != SYSTEM_BOOTING)
64149 + return 0;
64150 +
64151 if (addr >= (unsigned long)_sinittext
64152 && addr <= (unsigned long)_einittext)
64153 return 1;
64154 return 0;
64155 }
64156
64157 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64158 +#ifdef CONFIG_MODULES
64159 +static inline int is_module_text(unsigned long addr)
64160 +{
64161 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
64162 + return 1;
64163 +
64164 + addr = ktla_ktva(addr);
64165 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
64166 +}
64167 +#else
64168 +static inline int is_module_text(unsigned long addr)
64169 +{
64170 + return 0;
64171 +}
64172 +#endif
64173 +#endif
64174 +
64175 static inline int is_kernel_text(unsigned long addr)
64176 {
64177 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
64178 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
64179
64180 static inline int is_kernel(unsigned long addr)
64181 {
64182 +
64183 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64184 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
64185 + return 1;
64186 +
64187 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
64188 +#else
64189 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
64190 +#endif
64191 +
64192 return 1;
64193 return in_gate_area_no_mm(addr);
64194 }
64195
64196 static int is_ksym_addr(unsigned long addr)
64197 {
64198 +
64199 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64200 + if (is_module_text(addr))
64201 + return 0;
64202 +#endif
64203 +
64204 if (all_var)
64205 return is_kernel(addr);
64206
64207 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
64208
64209 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
64210 {
64211 - iter->name[0] = '\0';
64212 iter->nameoff = get_symbol_offset(new_pos);
64213 iter->pos = new_pos;
64214 }
64215 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
64216 {
64217 struct kallsym_iter *iter = m->private;
64218
64219 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64220 + if (current_uid())
64221 + return 0;
64222 +#endif
64223 +
64224 /* Some debugging symbols have no name. Ignore them. */
64225 if (!iter->name[0])
64226 return 0;
64227 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
64228 struct kallsym_iter *iter;
64229 int ret;
64230
64231 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
64232 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
64233 if (!iter)
64234 return -ENOMEM;
64235 reset_iter(iter, 0);
64236 diff --git a/kernel/kexec.c b/kernel/kexec.c
64237 index dc7bc08..4601964 100644
64238 --- a/kernel/kexec.c
64239 +++ b/kernel/kexec.c
64240 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
64241 unsigned long flags)
64242 {
64243 struct compat_kexec_segment in;
64244 - struct kexec_segment out, __user *ksegments;
64245 + struct kexec_segment out;
64246 + struct kexec_segment __user *ksegments;
64247 unsigned long i, result;
64248
64249 /* Don't allow clients that don't understand the native
64250 diff --git a/kernel/kmod.c b/kernel/kmod.c
64251 index a4bea97..7a1ae9a 100644
64252 --- a/kernel/kmod.c
64253 +++ b/kernel/kmod.c
64254 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
64255 * If module auto-loading support is disabled then this function
64256 * becomes a no-operation.
64257 */
64258 -int __request_module(bool wait, const char *fmt, ...)
64259 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
64260 {
64261 - va_list args;
64262 char module_name[MODULE_NAME_LEN];
64263 unsigned int max_modprobes;
64264 int ret;
64265 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
64266 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
64267 static char *envp[] = { "HOME=/",
64268 "TERM=linux",
64269 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
64270 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
64271 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
64272 static int kmod_loop_msg;
64273
64274 - va_start(args, fmt);
64275 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
64276 - va_end(args);
64277 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
64278 if (ret >= MODULE_NAME_LEN)
64279 return -ENAMETOOLONG;
64280
64281 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
64282 if (ret)
64283 return ret;
64284
64285 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64286 + if (!current_uid()) {
64287 + /* hack to workaround consolekit/udisks stupidity */
64288 + read_lock(&tasklist_lock);
64289 + if (!strcmp(current->comm, "mount") &&
64290 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
64291 + read_unlock(&tasklist_lock);
64292 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
64293 + return -EPERM;
64294 + }
64295 + read_unlock(&tasklist_lock);
64296 + }
64297 +#endif
64298 +
64299 /* If modprobe needs a service that is in a module, we get a recursive
64300 * loop. Limit the number of running kmod threads to max_threads/2 or
64301 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64302 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
64303 atomic_dec(&kmod_concurrent);
64304 return ret;
64305 }
64306 +
64307 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64308 +{
64309 + va_list args;
64310 + int ret;
64311 +
64312 + va_start(args, fmt);
64313 + ret = ____request_module(wait, module_param, fmt, args);
64314 + va_end(args);
64315 +
64316 + return ret;
64317 +}
64318 +
64319 +int __request_module(bool wait, const char *fmt, ...)
64320 +{
64321 + va_list args;
64322 + int ret;
64323 +
64324 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64325 + if (current_uid()) {
64326 + char module_param[MODULE_NAME_LEN];
64327 +
64328 + memset(module_param, 0, sizeof(module_param));
64329 +
64330 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64331 +
64332 + va_start(args, fmt);
64333 + ret = ____request_module(wait, module_param, fmt, args);
64334 + va_end(args);
64335 +
64336 + return ret;
64337 + }
64338 +#endif
64339 +
64340 + va_start(args, fmt);
64341 + ret = ____request_module(wait, NULL, fmt, args);
64342 + va_end(args);
64343 +
64344 + return ret;
64345 +}
64346 +
64347 EXPORT_SYMBOL(__request_module);
64348 #endif /* CONFIG_MODULES */
64349
64350 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64351 *
64352 * Thus the __user pointer cast is valid here.
64353 */
64354 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
64355 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64356
64357 /*
64358 * If ret is 0, either ____call_usermodehelper failed and the
64359 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64360 index faa39d1..d7ad37e 100644
64361 --- a/kernel/kprobes.c
64362 +++ b/kernel/kprobes.c
64363 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64364 * kernel image and loaded module images reside. This is required
64365 * so x86_64 can correctly handle the %rip-relative fixups.
64366 */
64367 - kip->insns = module_alloc(PAGE_SIZE);
64368 + kip->insns = module_alloc_exec(PAGE_SIZE);
64369 if (!kip->insns) {
64370 kfree(kip);
64371 return NULL;
64372 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64373 */
64374 if (!list_is_singular(&kip->list)) {
64375 list_del(&kip->list);
64376 - module_free(NULL, kip->insns);
64377 + module_free_exec(NULL, kip->insns);
64378 kfree(kip);
64379 }
64380 return 1;
64381 @@ -1953,7 +1953,7 @@ static int __init init_kprobes(void)
64382 {
64383 int i, err = 0;
64384 unsigned long offset = 0, size = 0;
64385 - char *modname, namebuf[128];
64386 + char *modname, namebuf[KSYM_NAME_LEN];
64387 const char *symbol_name;
64388 void *addr;
64389 struct kprobe_blackpoint *kb;
64390 @@ -2079,7 +2079,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64391 const char *sym = NULL;
64392 unsigned int i = *(loff_t *) v;
64393 unsigned long offset = 0;
64394 - char *modname, namebuf[128];
64395 + char *modname, namebuf[KSYM_NAME_LEN];
64396
64397 head = &kprobe_table[i];
64398 preempt_disable();
64399 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64400 index b2e08c9..01d8049 100644
64401 --- a/kernel/lockdep.c
64402 +++ b/kernel/lockdep.c
64403 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
64404 end = (unsigned long) &_end,
64405 addr = (unsigned long) obj;
64406
64407 +#ifdef CONFIG_PAX_KERNEXEC
64408 + start = ktla_ktva(start);
64409 +#endif
64410 +
64411 /*
64412 * static variable?
64413 */
64414 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64415 if (!static_obj(lock->key)) {
64416 debug_locks_off();
64417 printk("INFO: trying to register non-static key.\n");
64418 + printk("lock:%pS key:%pS.\n", lock, lock->key);
64419 printk("the code is fine but needs lockdep annotation.\n");
64420 printk("turning off the locking correctness validator.\n");
64421 dump_stack();
64422 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64423 if (!class)
64424 return 0;
64425 }
64426 - atomic_inc((atomic_t *)&class->ops);
64427 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64428 if (very_verbose(class)) {
64429 printk("\nacquire class [%p] %s", class->key, class->name);
64430 if (class->name_version > 1)
64431 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64432 index 91c32a0..b2c71c5 100644
64433 --- a/kernel/lockdep_proc.c
64434 +++ b/kernel/lockdep_proc.c
64435 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64436
64437 static void print_name(struct seq_file *m, struct lock_class *class)
64438 {
64439 - char str[128];
64440 + char str[KSYM_NAME_LEN];
64441 const char *name = class->name;
64442
64443 if (!name) {
64444 diff --git a/kernel/module.c b/kernel/module.c
64445 index 178333c..04e3408 100644
64446 --- a/kernel/module.c
64447 +++ b/kernel/module.c
64448 @@ -58,6 +58,7 @@
64449 #include <linux/jump_label.h>
64450 #include <linux/pfn.h>
64451 #include <linux/bsearch.h>
64452 +#include <linux/grsecurity.h>
64453
64454 #define CREATE_TRACE_POINTS
64455 #include <trace/events/module.h>
64456 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64457
64458 /* Bounds of module allocation, for speeding __module_address.
64459 * Protected by module_mutex. */
64460 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64461 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64462 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64463
64464 int register_module_notifier(struct notifier_block * nb)
64465 {
64466 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64467 return true;
64468
64469 list_for_each_entry_rcu(mod, &modules, list) {
64470 - struct symsearch arr[] = {
64471 + struct symsearch modarr[] = {
64472 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64473 NOT_GPL_ONLY, false },
64474 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64475 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64476 #endif
64477 };
64478
64479 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64480 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64481 return true;
64482 }
64483 return false;
64484 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64485 static int percpu_modalloc(struct module *mod,
64486 unsigned long size, unsigned long align)
64487 {
64488 - if (align > PAGE_SIZE) {
64489 + if (align-1 >= PAGE_SIZE) {
64490 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64491 mod->name, align, PAGE_SIZE);
64492 align = PAGE_SIZE;
64493 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64494 */
64495 #ifdef CONFIG_SYSFS
64496
64497 -#ifdef CONFIG_KALLSYMS
64498 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64499 static inline bool sect_empty(const Elf_Shdr *sect)
64500 {
64501 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64502 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64503
64504 static void unset_module_core_ro_nx(struct module *mod)
64505 {
64506 - set_page_attributes(mod->module_core + mod->core_text_size,
64507 - mod->module_core + mod->core_size,
64508 + set_page_attributes(mod->module_core_rw,
64509 + mod->module_core_rw + mod->core_size_rw,
64510 set_memory_x);
64511 - set_page_attributes(mod->module_core,
64512 - mod->module_core + mod->core_ro_size,
64513 + set_page_attributes(mod->module_core_rx,
64514 + mod->module_core_rx + mod->core_size_rx,
64515 set_memory_rw);
64516 }
64517
64518 static void unset_module_init_ro_nx(struct module *mod)
64519 {
64520 - set_page_attributes(mod->module_init + mod->init_text_size,
64521 - mod->module_init + mod->init_size,
64522 + set_page_attributes(mod->module_init_rw,
64523 + mod->module_init_rw + mod->init_size_rw,
64524 set_memory_x);
64525 - set_page_attributes(mod->module_init,
64526 - mod->module_init + mod->init_ro_size,
64527 + set_page_attributes(mod->module_init_rx,
64528 + mod->module_init_rx + mod->init_size_rx,
64529 set_memory_rw);
64530 }
64531
64532 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64533
64534 mutex_lock(&module_mutex);
64535 list_for_each_entry_rcu(mod, &modules, list) {
64536 - if ((mod->module_core) && (mod->core_text_size)) {
64537 - set_page_attributes(mod->module_core,
64538 - mod->module_core + mod->core_text_size,
64539 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64540 + set_page_attributes(mod->module_core_rx,
64541 + mod->module_core_rx + mod->core_size_rx,
64542 set_memory_rw);
64543 }
64544 - if ((mod->module_init) && (mod->init_text_size)) {
64545 - set_page_attributes(mod->module_init,
64546 - mod->module_init + mod->init_text_size,
64547 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64548 + set_page_attributes(mod->module_init_rx,
64549 + mod->module_init_rx + mod->init_size_rx,
64550 set_memory_rw);
64551 }
64552 }
64553 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64554
64555 mutex_lock(&module_mutex);
64556 list_for_each_entry_rcu(mod, &modules, list) {
64557 - if ((mod->module_core) && (mod->core_text_size)) {
64558 - set_page_attributes(mod->module_core,
64559 - mod->module_core + mod->core_text_size,
64560 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64561 + set_page_attributes(mod->module_core_rx,
64562 + mod->module_core_rx + mod->core_size_rx,
64563 set_memory_ro);
64564 }
64565 - if ((mod->module_init) && (mod->init_text_size)) {
64566 - set_page_attributes(mod->module_init,
64567 - mod->module_init + mod->init_text_size,
64568 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64569 + set_page_attributes(mod->module_init_rx,
64570 + mod->module_init_rx + mod->init_size_rx,
64571 set_memory_ro);
64572 }
64573 }
64574 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64575
64576 /* This may be NULL, but that's OK */
64577 unset_module_init_ro_nx(mod);
64578 - module_free(mod, mod->module_init);
64579 + module_free(mod, mod->module_init_rw);
64580 + module_free_exec(mod, mod->module_init_rx);
64581 kfree(mod->args);
64582 percpu_modfree(mod);
64583
64584 /* Free lock-classes: */
64585 - lockdep_free_key_range(mod->module_core, mod->core_size);
64586 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64587 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64588
64589 /* Finally, free the core (containing the module structure) */
64590 unset_module_core_ro_nx(mod);
64591 - module_free(mod, mod->module_core);
64592 + module_free_exec(mod, mod->module_core_rx);
64593 + module_free(mod, mod->module_core_rw);
64594
64595 #ifdef CONFIG_MPU
64596 update_protections(current->mm);
64597 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64598 unsigned int i;
64599 int ret = 0;
64600 const struct kernel_symbol *ksym;
64601 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64602 + int is_fs_load = 0;
64603 + int register_filesystem_found = 0;
64604 + char *p;
64605 +
64606 + p = strstr(mod->args, "grsec_modharden_fs");
64607 + if (p) {
64608 + char *endptr = p + strlen("grsec_modharden_fs");
64609 + /* copy \0 as well */
64610 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64611 + is_fs_load = 1;
64612 + }
64613 +#endif
64614
64615 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64616 const char *name = info->strtab + sym[i].st_name;
64617
64618 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64619 + /* it's a real shame this will never get ripped and copied
64620 + upstream! ;(
64621 + */
64622 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64623 + register_filesystem_found = 1;
64624 +#endif
64625 +
64626 switch (sym[i].st_shndx) {
64627 case SHN_COMMON:
64628 /* We compiled with -fno-common. These are not
64629 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64630 ksym = resolve_symbol_wait(mod, info, name);
64631 /* Ok if resolved. */
64632 if (ksym && !IS_ERR(ksym)) {
64633 + pax_open_kernel();
64634 sym[i].st_value = ksym->value;
64635 + pax_close_kernel();
64636 break;
64637 }
64638
64639 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64640 secbase = (unsigned long)mod_percpu(mod);
64641 else
64642 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64643 + pax_open_kernel();
64644 sym[i].st_value += secbase;
64645 + pax_close_kernel();
64646 break;
64647 }
64648 }
64649
64650 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64651 + if (is_fs_load && !register_filesystem_found) {
64652 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64653 + ret = -EPERM;
64654 + }
64655 +#endif
64656 +
64657 return ret;
64658 }
64659
64660 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64661 || s->sh_entsize != ~0UL
64662 || strstarts(sname, ".init"))
64663 continue;
64664 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64665 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64666 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64667 + else
64668 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64669 DEBUGP("\t%s\n", name);
64670 }
64671 - switch (m) {
64672 - case 0: /* executable */
64673 - mod->core_size = debug_align(mod->core_size);
64674 - mod->core_text_size = mod->core_size;
64675 - break;
64676 - case 1: /* RO: text and ro-data */
64677 - mod->core_size = debug_align(mod->core_size);
64678 - mod->core_ro_size = mod->core_size;
64679 - break;
64680 - case 3: /* whole core */
64681 - mod->core_size = debug_align(mod->core_size);
64682 - break;
64683 - }
64684 }
64685
64686 DEBUGP("Init section allocation order:\n");
64687 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64688 || s->sh_entsize != ~0UL
64689 || !strstarts(sname, ".init"))
64690 continue;
64691 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64692 - | INIT_OFFSET_MASK);
64693 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64694 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64695 + else
64696 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64697 + s->sh_entsize |= INIT_OFFSET_MASK;
64698 DEBUGP("\t%s\n", sname);
64699 }
64700 - switch (m) {
64701 - case 0: /* executable */
64702 - mod->init_size = debug_align(mod->init_size);
64703 - mod->init_text_size = mod->init_size;
64704 - break;
64705 - case 1: /* RO: text and ro-data */
64706 - mod->init_size = debug_align(mod->init_size);
64707 - mod->init_ro_size = mod->init_size;
64708 - break;
64709 - case 3: /* whole init */
64710 - mod->init_size = debug_align(mod->init_size);
64711 - break;
64712 - }
64713 }
64714 }
64715
64716 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64717
64718 /* Put symbol section at end of init part of module. */
64719 symsect->sh_flags |= SHF_ALLOC;
64720 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64721 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64722 info->index.sym) | INIT_OFFSET_MASK;
64723 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64724
64725 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64726 }
64727
64728 /* Append room for core symbols at end of core part. */
64729 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64730 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64731 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64732 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64733
64734 /* Put string table section at end of init part of module. */
64735 strsect->sh_flags |= SHF_ALLOC;
64736 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64737 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64738 info->index.str) | INIT_OFFSET_MASK;
64739 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64740
64741 /* Append room for core symbols' strings at end of core part. */
64742 - info->stroffs = mod->core_size;
64743 + info->stroffs = mod->core_size_rx;
64744 __set_bit(0, info->strmap);
64745 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64746 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64747 }
64748
64749 static void add_kallsyms(struct module *mod, const struct load_info *info)
64750 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64751 /* Make sure we get permanent strtab: don't use info->strtab. */
64752 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64753
64754 + pax_open_kernel();
64755 +
64756 /* Set types up while we still have access to sections. */
64757 for (i = 0; i < mod->num_symtab; i++)
64758 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64759
64760 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64761 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64762 src = mod->symtab;
64763 *dst = *src;
64764 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64765 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64766 }
64767 mod->core_num_syms = ndst;
64768
64769 - mod->core_strtab = s = mod->module_core + info->stroffs;
64770 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64771 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64772 if (test_bit(i, info->strmap))
64773 *++s = mod->strtab[i];
64774 +
64775 + pax_close_kernel();
64776 }
64777 #else
64778 static inline void layout_symtab(struct module *mod, struct load_info *info)
64779 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64780 return size == 0 ? NULL : vmalloc_exec(size);
64781 }
64782
64783 -static void *module_alloc_update_bounds(unsigned long size)
64784 +static void *module_alloc_update_bounds_rw(unsigned long size)
64785 {
64786 void *ret = module_alloc(size);
64787
64788 if (ret) {
64789 mutex_lock(&module_mutex);
64790 /* Update module bounds. */
64791 - if ((unsigned long)ret < module_addr_min)
64792 - module_addr_min = (unsigned long)ret;
64793 - if ((unsigned long)ret + size > module_addr_max)
64794 - module_addr_max = (unsigned long)ret + size;
64795 + if ((unsigned long)ret < module_addr_min_rw)
64796 + module_addr_min_rw = (unsigned long)ret;
64797 + if ((unsigned long)ret + size > module_addr_max_rw)
64798 + module_addr_max_rw = (unsigned long)ret + size;
64799 + mutex_unlock(&module_mutex);
64800 + }
64801 + return ret;
64802 +}
64803 +
64804 +static void *module_alloc_update_bounds_rx(unsigned long size)
64805 +{
64806 + void *ret = module_alloc_exec(size);
64807 +
64808 + if (ret) {
64809 + mutex_lock(&module_mutex);
64810 + /* Update module bounds. */
64811 + if ((unsigned long)ret < module_addr_min_rx)
64812 + module_addr_min_rx = (unsigned long)ret;
64813 + if ((unsigned long)ret + size > module_addr_max_rx)
64814 + module_addr_max_rx = (unsigned long)ret + size;
64815 mutex_unlock(&module_mutex);
64816 }
64817 return ret;
64818 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64819 static int check_modinfo(struct module *mod, struct load_info *info)
64820 {
64821 const char *modmagic = get_modinfo(info, "vermagic");
64822 + const char *license = get_modinfo(info, "license");
64823 int err;
64824
64825 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64826 + if (!license || !license_is_gpl_compatible(license))
64827 + return -ENOEXEC;
64828 +#endif
64829 +
64830 /* This is allowed: modprobe --force will invalidate it. */
64831 if (!modmagic) {
64832 err = try_to_force_load(mod, "bad vermagic");
64833 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64834 }
64835
64836 /* Set up license info based on the info section */
64837 - set_license(mod, get_modinfo(info, "license"));
64838 + set_license(mod, license);
64839
64840 return 0;
64841 }
64842 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64843 void *ptr;
64844
64845 /* Do the allocs. */
64846 - ptr = module_alloc_update_bounds(mod->core_size);
64847 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64848 /*
64849 * The pointer to this block is stored in the module structure
64850 * which is inside the block. Just mark it as not being a
64851 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64852 if (!ptr)
64853 return -ENOMEM;
64854
64855 - memset(ptr, 0, mod->core_size);
64856 - mod->module_core = ptr;
64857 + memset(ptr, 0, mod->core_size_rw);
64858 + mod->module_core_rw = ptr;
64859
64860 - ptr = module_alloc_update_bounds(mod->init_size);
64861 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64862 /*
64863 * The pointer to this block is stored in the module structure
64864 * which is inside the block. This block doesn't need to be
64865 * scanned as it contains data and code that will be freed
64866 * after the module is initialized.
64867 */
64868 - kmemleak_ignore(ptr);
64869 - if (!ptr && mod->init_size) {
64870 - module_free(mod, mod->module_core);
64871 + kmemleak_not_leak(ptr);
64872 + if (!ptr && mod->init_size_rw) {
64873 + module_free(mod, mod->module_core_rw);
64874 return -ENOMEM;
64875 }
64876 - memset(ptr, 0, mod->init_size);
64877 - mod->module_init = ptr;
64878 + memset(ptr, 0, mod->init_size_rw);
64879 + mod->module_init_rw = ptr;
64880 +
64881 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64882 + kmemleak_not_leak(ptr);
64883 + if (!ptr) {
64884 + module_free(mod, mod->module_init_rw);
64885 + module_free(mod, mod->module_core_rw);
64886 + return -ENOMEM;
64887 + }
64888 +
64889 + pax_open_kernel();
64890 + memset(ptr, 0, mod->core_size_rx);
64891 + pax_close_kernel();
64892 + mod->module_core_rx = ptr;
64893 +
64894 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64895 + kmemleak_not_leak(ptr);
64896 + if (!ptr && mod->init_size_rx) {
64897 + module_free_exec(mod, mod->module_core_rx);
64898 + module_free(mod, mod->module_init_rw);
64899 + module_free(mod, mod->module_core_rw);
64900 + return -ENOMEM;
64901 + }
64902 +
64903 + pax_open_kernel();
64904 + memset(ptr, 0, mod->init_size_rx);
64905 + pax_close_kernel();
64906 + mod->module_init_rx = ptr;
64907
64908 /* Transfer each section which specifies SHF_ALLOC */
64909 DEBUGP("final section addresses:\n");
64910 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64911 if (!(shdr->sh_flags & SHF_ALLOC))
64912 continue;
64913
64914 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64915 - dest = mod->module_init
64916 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64917 - else
64918 - dest = mod->module_core + shdr->sh_entsize;
64919 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64920 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64921 + dest = mod->module_init_rw
64922 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64923 + else
64924 + dest = mod->module_init_rx
64925 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64926 + } else {
64927 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64928 + dest = mod->module_core_rw + shdr->sh_entsize;
64929 + else
64930 + dest = mod->module_core_rx + shdr->sh_entsize;
64931 + }
64932 +
64933 + if (shdr->sh_type != SHT_NOBITS) {
64934 +
64935 +#ifdef CONFIG_PAX_KERNEXEC
64936 +#ifdef CONFIG_X86_64
64937 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64938 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64939 +#endif
64940 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64941 + pax_open_kernel();
64942 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64943 + pax_close_kernel();
64944 + } else
64945 +#endif
64946
64947 - if (shdr->sh_type != SHT_NOBITS)
64948 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64949 + }
64950 /* Update sh_addr to point to copy in image. */
64951 - shdr->sh_addr = (unsigned long)dest;
64952 +
64953 +#ifdef CONFIG_PAX_KERNEXEC
64954 + if (shdr->sh_flags & SHF_EXECINSTR)
64955 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64956 + else
64957 +#endif
64958 +
64959 + shdr->sh_addr = (unsigned long)dest;
64960 DEBUGP("\t0x%lx %s\n",
64961 shdr->sh_addr, info->secstrings + shdr->sh_name);
64962 }
64963 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64964 * Do it before processing of module parameters, so the module
64965 * can provide parameter accessor functions of its own.
64966 */
64967 - if (mod->module_init)
64968 - flush_icache_range((unsigned long)mod->module_init,
64969 - (unsigned long)mod->module_init
64970 - + mod->init_size);
64971 - flush_icache_range((unsigned long)mod->module_core,
64972 - (unsigned long)mod->module_core + mod->core_size);
64973 + if (mod->module_init_rx)
64974 + flush_icache_range((unsigned long)mod->module_init_rx,
64975 + (unsigned long)mod->module_init_rx
64976 + + mod->init_size_rx);
64977 + flush_icache_range((unsigned long)mod->module_core_rx,
64978 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64979
64980 set_fs(old_fs);
64981 }
64982 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64983 {
64984 kfree(info->strmap);
64985 percpu_modfree(mod);
64986 - module_free(mod, mod->module_init);
64987 - module_free(mod, mod->module_core);
64988 + module_free_exec(mod, mod->module_init_rx);
64989 + module_free_exec(mod, mod->module_core_rx);
64990 + module_free(mod, mod->module_init_rw);
64991 + module_free(mod, mod->module_core_rw);
64992 }
64993
64994 int __weak module_finalize(const Elf_Ehdr *hdr,
64995 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64996 if (err)
64997 goto free_unload;
64998
64999 + /* Now copy in args */
65000 + mod->args = strndup_user(uargs, ~0UL >> 1);
65001 + if (IS_ERR(mod->args)) {
65002 + err = PTR_ERR(mod->args);
65003 + goto free_unload;
65004 + }
65005 +
65006 /* Set up MODINFO_ATTR fields */
65007 setup_modinfo(mod, &info);
65008
65009 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65010 + {
65011 + char *p, *p2;
65012 +
65013 + if (strstr(mod->args, "grsec_modharden_netdev")) {
65014 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
65015 + err = -EPERM;
65016 + goto free_modinfo;
65017 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
65018 + p += strlen("grsec_modharden_normal");
65019 + p2 = strstr(p, "_");
65020 + if (p2) {
65021 + *p2 = '\0';
65022 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
65023 + *p2 = '_';
65024 + }
65025 + err = -EPERM;
65026 + goto free_modinfo;
65027 + }
65028 + }
65029 +#endif
65030 +
65031 /* Fix up syms, so that st_value is a pointer to location. */
65032 err = simplify_symbols(mod, &info);
65033 if (err < 0)
65034 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
65035
65036 flush_module_icache(mod);
65037
65038 - /* Now copy in args */
65039 - mod->args = strndup_user(uargs, ~0UL >> 1);
65040 - if (IS_ERR(mod->args)) {
65041 - err = PTR_ERR(mod->args);
65042 - goto free_arch_cleanup;
65043 - }
65044 -
65045 /* Mark state as coming so strong_try_module_get() ignores us. */
65046 mod->state = MODULE_STATE_COMING;
65047
65048 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
65049 unlock:
65050 mutex_unlock(&module_mutex);
65051 synchronize_sched();
65052 - kfree(mod->args);
65053 - free_arch_cleanup:
65054 module_arch_cleanup(mod);
65055 free_modinfo:
65056 free_modinfo(mod);
65057 + kfree(mod->args);
65058 free_unload:
65059 module_unload_free(mod);
65060 free_module:
65061 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65062 MODULE_STATE_COMING, mod);
65063
65064 /* Set RO and NX regions for core */
65065 - set_section_ro_nx(mod->module_core,
65066 - mod->core_text_size,
65067 - mod->core_ro_size,
65068 - mod->core_size);
65069 + set_section_ro_nx(mod->module_core_rx,
65070 + mod->core_size_rx,
65071 + mod->core_size_rx,
65072 + mod->core_size_rx);
65073
65074 /* Set RO and NX regions for init */
65075 - set_section_ro_nx(mod->module_init,
65076 - mod->init_text_size,
65077 - mod->init_ro_size,
65078 - mod->init_size);
65079 + set_section_ro_nx(mod->module_init_rx,
65080 + mod->init_size_rx,
65081 + mod->init_size_rx,
65082 + mod->init_size_rx);
65083
65084 do_mod_ctors(mod);
65085 /* Start the module */
65086 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65087 mod->strtab = mod->core_strtab;
65088 #endif
65089 unset_module_init_ro_nx(mod);
65090 - module_free(mod, mod->module_init);
65091 - mod->module_init = NULL;
65092 - mod->init_size = 0;
65093 - mod->init_ro_size = 0;
65094 - mod->init_text_size = 0;
65095 + module_free(mod, mod->module_init_rw);
65096 + module_free_exec(mod, mod->module_init_rx);
65097 + mod->module_init_rw = NULL;
65098 + mod->module_init_rx = NULL;
65099 + mod->init_size_rw = 0;
65100 + mod->init_size_rx = 0;
65101 mutex_unlock(&module_mutex);
65102
65103 return 0;
65104 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
65105 unsigned long nextval;
65106
65107 /* At worse, next value is at end of module */
65108 - if (within_module_init(addr, mod))
65109 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
65110 + if (within_module_init_rx(addr, mod))
65111 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
65112 + else if (within_module_init_rw(addr, mod))
65113 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
65114 + else if (within_module_core_rx(addr, mod))
65115 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
65116 + else if (within_module_core_rw(addr, mod))
65117 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
65118 else
65119 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
65120 + return NULL;
65121
65122 /* Scan for closest preceding symbol, and next symbol. (ELF
65123 starts real symbols at 1). */
65124 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
65125 char buf[8];
65126
65127 seq_printf(m, "%s %u",
65128 - mod->name, mod->init_size + mod->core_size);
65129 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
65130 print_unload_info(m, mod);
65131
65132 /* Informative for users. */
65133 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
65134 mod->state == MODULE_STATE_COMING ? "Loading":
65135 "Live");
65136 /* Used by oprofile and other similar tools. */
65137 - seq_printf(m, " 0x%pK", mod->module_core);
65138 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
65139
65140 /* Taints info */
65141 if (mod->taints)
65142 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
65143
65144 static int __init proc_modules_init(void)
65145 {
65146 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65147 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65148 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65149 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65150 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
65151 +#else
65152 proc_create("modules", 0, NULL, &proc_modules_operations);
65153 +#endif
65154 +#else
65155 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65156 +#endif
65157 return 0;
65158 }
65159 module_init(proc_modules_init);
65160 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
65161 {
65162 struct module *mod;
65163
65164 - if (addr < module_addr_min || addr > module_addr_max)
65165 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
65166 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
65167 return NULL;
65168
65169 list_for_each_entry_rcu(mod, &modules, list)
65170 - if (within_module_core(addr, mod)
65171 - || within_module_init(addr, mod))
65172 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
65173 return mod;
65174 return NULL;
65175 }
65176 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
65177 */
65178 struct module *__module_text_address(unsigned long addr)
65179 {
65180 - struct module *mod = __module_address(addr);
65181 + struct module *mod;
65182 +
65183 +#ifdef CONFIG_X86_32
65184 + addr = ktla_ktva(addr);
65185 +#endif
65186 +
65187 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
65188 + return NULL;
65189 +
65190 + mod = __module_address(addr);
65191 +
65192 if (mod) {
65193 /* Make sure it's within the text section. */
65194 - if (!within(addr, mod->module_init, mod->init_text_size)
65195 - && !within(addr, mod->module_core, mod->core_text_size))
65196 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
65197 mod = NULL;
65198 }
65199 return mod;
65200 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
65201 index 7e3443f..b2a1e6b 100644
65202 --- a/kernel/mutex-debug.c
65203 +++ b/kernel/mutex-debug.c
65204 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
65205 }
65206
65207 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65208 - struct thread_info *ti)
65209 + struct task_struct *task)
65210 {
65211 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
65212
65213 /* Mark the current thread as blocked on the lock: */
65214 - ti->task->blocked_on = waiter;
65215 + task->blocked_on = waiter;
65216 }
65217
65218 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65219 - struct thread_info *ti)
65220 + struct task_struct *task)
65221 {
65222 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
65223 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
65224 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
65225 - ti->task->blocked_on = NULL;
65226 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
65227 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
65228 + task->blocked_on = NULL;
65229
65230 list_del_init(&waiter->list);
65231 waiter->task = NULL;
65232 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
65233 index 0799fd3..d06ae3b 100644
65234 --- a/kernel/mutex-debug.h
65235 +++ b/kernel/mutex-debug.h
65236 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
65237 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
65238 extern void debug_mutex_add_waiter(struct mutex *lock,
65239 struct mutex_waiter *waiter,
65240 - struct thread_info *ti);
65241 + struct task_struct *task);
65242 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65243 - struct thread_info *ti);
65244 + struct task_struct *task);
65245 extern void debug_mutex_unlock(struct mutex *lock);
65246 extern void debug_mutex_init(struct mutex *lock, const char *name,
65247 struct lock_class_key *key);
65248 diff --git a/kernel/mutex.c b/kernel/mutex.c
65249 index 89096dd..f91ebc5 100644
65250 --- a/kernel/mutex.c
65251 +++ b/kernel/mutex.c
65252 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65253 spin_lock_mutex(&lock->wait_lock, flags);
65254
65255 debug_mutex_lock_common(lock, &waiter);
65256 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
65257 + debug_mutex_add_waiter(lock, &waiter, task);
65258
65259 /* add waiting tasks to the end of the waitqueue (FIFO): */
65260 list_add_tail(&waiter.list, &lock->wait_list);
65261 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65262 * TASK_UNINTERRUPTIBLE case.)
65263 */
65264 if (unlikely(signal_pending_state(state, task))) {
65265 - mutex_remove_waiter(lock, &waiter,
65266 - task_thread_info(task));
65267 + mutex_remove_waiter(lock, &waiter, task);
65268 mutex_release(&lock->dep_map, 1, ip);
65269 spin_unlock_mutex(&lock->wait_lock, flags);
65270
65271 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65272 done:
65273 lock_acquired(&lock->dep_map, ip);
65274 /* got the lock - rejoice! */
65275 - mutex_remove_waiter(lock, &waiter, current_thread_info());
65276 + mutex_remove_waiter(lock, &waiter, task);
65277 mutex_set_owner(lock);
65278
65279 /* set it to 0 if there are no waiters left: */
65280 diff --git a/kernel/padata.c b/kernel/padata.c
65281 index b452599..5d68f4e 100644
65282 --- a/kernel/padata.c
65283 +++ b/kernel/padata.c
65284 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
65285 padata->pd = pd;
65286 padata->cb_cpu = cb_cpu;
65287
65288 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
65289 - atomic_set(&pd->seq_nr, -1);
65290 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
65291 + atomic_set_unchecked(&pd->seq_nr, -1);
65292
65293 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
65294 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
65295
65296 target_cpu = padata_cpu_hash(padata);
65297 queue = per_cpu_ptr(pd->pqueue, target_cpu);
65298 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
65299 padata_init_pqueues(pd);
65300 padata_init_squeues(pd);
65301 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65302 - atomic_set(&pd->seq_nr, -1);
65303 + atomic_set_unchecked(&pd->seq_nr, -1);
65304 atomic_set(&pd->reorder_objects, 0);
65305 atomic_set(&pd->refcnt, 0);
65306 pd->pinst = pinst;
65307 diff --git a/kernel/panic.c b/kernel/panic.c
65308 index 3458469..342c500 100644
65309 --- a/kernel/panic.c
65310 +++ b/kernel/panic.c
65311 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65312 va_end(args);
65313 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65314 #ifdef CONFIG_DEBUG_BUGVERBOSE
65315 - dump_stack();
65316 + /*
65317 + * Avoid nested stack-dumping if a panic occurs during oops processing
65318 + */
65319 + if (!oops_in_progress)
65320 + dump_stack();
65321 #endif
65322
65323 /*
65324 @@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65325 const char *board;
65326
65327 printk(KERN_WARNING "------------[ cut here ]------------\n");
65328 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65329 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65330 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65331 if (board)
65332 printk(KERN_WARNING "Hardware name: %s\n", board);
65333 @@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65334 */
65335 void __stack_chk_fail(void)
65336 {
65337 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
65338 + dump_stack();
65339 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65340 __builtin_return_address(0));
65341 }
65342 EXPORT_SYMBOL(__stack_chk_fail);
65343 diff --git a/kernel/pid.c b/kernel/pid.c
65344 index fa5f722..0c93e57 100644
65345 --- a/kernel/pid.c
65346 +++ b/kernel/pid.c
65347 @@ -33,6 +33,7 @@
65348 #include <linux/rculist.h>
65349 #include <linux/bootmem.h>
65350 #include <linux/hash.h>
65351 +#include <linux/security.h>
65352 #include <linux/pid_namespace.h>
65353 #include <linux/init_task.h>
65354 #include <linux/syscalls.h>
65355 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65356
65357 int pid_max = PID_MAX_DEFAULT;
65358
65359 -#define RESERVED_PIDS 300
65360 +#define RESERVED_PIDS 500
65361
65362 int pid_max_min = RESERVED_PIDS + 1;
65363 int pid_max_max = PID_MAX_LIMIT;
65364 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65365 */
65366 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65367 {
65368 + struct task_struct *task;
65369 +
65370 rcu_lockdep_assert(rcu_read_lock_held(),
65371 "find_task_by_pid_ns() needs rcu_read_lock()"
65372 " protection");
65373 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65374 +
65375 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65376 +
65377 + if (gr_pid_is_chrooted(task))
65378 + return NULL;
65379 +
65380 + return task;
65381 }
65382
65383 struct task_struct *find_task_by_vpid(pid_t vnr)
65384 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65385 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65386 }
65387
65388 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65389 +{
65390 + rcu_lockdep_assert(rcu_read_lock_held(),
65391 + "find_task_by_pid_ns() needs rcu_read_lock()"
65392 + " protection");
65393 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65394 +}
65395 +
65396 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65397 {
65398 struct pid *pid;
65399 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65400 index e7cb76d..75eceb3 100644
65401 --- a/kernel/posix-cpu-timers.c
65402 +++ b/kernel/posix-cpu-timers.c
65403 @@ -6,6 +6,7 @@
65404 #include <linux/posix-timers.h>
65405 #include <linux/errno.h>
65406 #include <linux/math64.h>
65407 +#include <linux/security.h>
65408 #include <asm/uaccess.h>
65409 #include <linux/kernel_stat.h>
65410 #include <trace/events/timer.h>
65411 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65412
65413 static __init int init_posix_cpu_timers(void)
65414 {
65415 - struct k_clock process = {
65416 + static struct k_clock process = {
65417 .clock_getres = process_cpu_clock_getres,
65418 .clock_get = process_cpu_clock_get,
65419 .timer_create = process_cpu_timer_create,
65420 .nsleep = process_cpu_nsleep,
65421 .nsleep_restart = process_cpu_nsleep_restart,
65422 };
65423 - struct k_clock thread = {
65424 + static struct k_clock thread = {
65425 .clock_getres = thread_cpu_clock_getres,
65426 .clock_get = thread_cpu_clock_get,
65427 .timer_create = thread_cpu_timer_create,
65428 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65429 index 69185ae..cc2847a 100644
65430 --- a/kernel/posix-timers.c
65431 +++ b/kernel/posix-timers.c
65432 @@ -43,6 +43,7 @@
65433 #include <linux/idr.h>
65434 #include <linux/posix-clock.h>
65435 #include <linux/posix-timers.h>
65436 +#include <linux/grsecurity.h>
65437 #include <linux/syscalls.h>
65438 #include <linux/wait.h>
65439 #include <linux/workqueue.h>
65440 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65441 * which we beg off on and pass to do_sys_settimeofday().
65442 */
65443
65444 -static struct k_clock posix_clocks[MAX_CLOCKS];
65445 +static struct k_clock *posix_clocks[MAX_CLOCKS];
65446
65447 /*
65448 * These ones are defined below.
65449 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65450 */
65451 static __init int init_posix_timers(void)
65452 {
65453 - struct k_clock clock_realtime = {
65454 + static struct k_clock clock_realtime = {
65455 .clock_getres = hrtimer_get_res,
65456 .clock_get = posix_clock_realtime_get,
65457 .clock_set = posix_clock_realtime_set,
65458 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65459 .timer_get = common_timer_get,
65460 .timer_del = common_timer_del,
65461 };
65462 - struct k_clock clock_monotonic = {
65463 + static struct k_clock clock_monotonic = {
65464 .clock_getres = hrtimer_get_res,
65465 .clock_get = posix_ktime_get_ts,
65466 .nsleep = common_nsleep,
65467 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65468 .timer_get = common_timer_get,
65469 .timer_del = common_timer_del,
65470 };
65471 - struct k_clock clock_monotonic_raw = {
65472 + static struct k_clock clock_monotonic_raw = {
65473 .clock_getres = hrtimer_get_res,
65474 .clock_get = posix_get_monotonic_raw,
65475 };
65476 - struct k_clock clock_realtime_coarse = {
65477 + static struct k_clock clock_realtime_coarse = {
65478 .clock_getres = posix_get_coarse_res,
65479 .clock_get = posix_get_realtime_coarse,
65480 };
65481 - struct k_clock clock_monotonic_coarse = {
65482 + static struct k_clock clock_monotonic_coarse = {
65483 .clock_getres = posix_get_coarse_res,
65484 .clock_get = posix_get_monotonic_coarse,
65485 };
65486 - struct k_clock clock_boottime = {
65487 + static struct k_clock clock_boottime = {
65488 .clock_getres = hrtimer_get_res,
65489 .clock_get = posix_get_boottime,
65490 .nsleep = common_nsleep,
65491 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65492 return;
65493 }
65494
65495 - posix_clocks[clock_id] = *new_clock;
65496 + posix_clocks[clock_id] = new_clock;
65497 }
65498 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65499
65500 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65501 return (id & CLOCKFD_MASK) == CLOCKFD ?
65502 &clock_posix_dynamic : &clock_posix_cpu;
65503
65504 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65505 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65506 return NULL;
65507 - return &posix_clocks[id];
65508 + return posix_clocks[id];
65509 }
65510
65511 static int common_timer_create(struct k_itimer *new_timer)
65512 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65513 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65514 return -EFAULT;
65515
65516 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65517 + have their clock_set fptr set to a nosettime dummy function
65518 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65519 + call common_clock_set, which calls do_sys_settimeofday, which
65520 + we hook
65521 + */
65522 +
65523 return kc->clock_set(which_clock, &new_tp);
65524 }
65525
65526 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65527 index d523593..68197a4 100644
65528 --- a/kernel/power/poweroff.c
65529 +++ b/kernel/power/poweroff.c
65530 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65531 .enable_mask = SYSRQ_ENABLE_BOOT,
65532 };
65533
65534 -static int pm_sysrq_init(void)
65535 +static int __init pm_sysrq_init(void)
65536 {
65537 register_sysrq_key('o', &sysrq_poweroff_op);
65538 return 0;
65539 diff --git a/kernel/power/process.c b/kernel/power/process.c
65540 index 3d4b954..11af930 100644
65541 --- a/kernel/power/process.c
65542 +++ b/kernel/power/process.c
65543 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65544 u64 elapsed_csecs64;
65545 unsigned int elapsed_csecs;
65546 bool wakeup = false;
65547 + bool timedout = false;
65548
65549 do_gettimeofday(&start);
65550
65551 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65552
65553 while (true) {
65554 todo = 0;
65555 + if (time_after(jiffies, end_time))
65556 + timedout = true;
65557 read_lock(&tasklist_lock);
65558 do_each_thread(g, p) {
65559 if (frozen(p) || !freezable(p))
65560 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65561 * try_to_stop() after schedule() in ptrace/signal
65562 * stop sees TIF_FREEZE.
65563 */
65564 - if (!task_is_stopped_or_traced(p) &&
65565 - !freezer_should_skip(p))
65566 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65567 todo++;
65568 + if (timedout) {
65569 + printk(KERN_ERR "Task refusing to freeze:\n");
65570 + sched_show_task(p);
65571 + }
65572 + }
65573 } while_each_thread(g, p);
65574 read_unlock(&tasklist_lock);
65575
65576 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65577 todo += wq_busy;
65578 }
65579
65580 - if (!todo || time_after(jiffies, end_time))
65581 + if (!todo || timedout)
65582 break;
65583
65584 if (pm_wakeup_pending()) {
65585 diff --git a/kernel/printk.c b/kernel/printk.c
65586 index 7982a0a..2095fdc 100644
65587 --- a/kernel/printk.c
65588 +++ b/kernel/printk.c
65589 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65590 if (from_file && type != SYSLOG_ACTION_OPEN)
65591 return 0;
65592
65593 +#ifdef CONFIG_GRKERNSEC_DMESG
65594 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65595 + return -EPERM;
65596 +#endif
65597 +
65598 if (syslog_action_restricted(type)) {
65599 if (capable(CAP_SYSLOG))
65600 return 0;
65601 diff --git a/kernel/profile.c b/kernel/profile.c
65602 index 76b8e77..a2930e8 100644
65603 --- a/kernel/profile.c
65604 +++ b/kernel/profile.c
65605 @@ -39,7 +39,7 @@ struct profile_hit {
65606 /* Oprofile timer tick hook */
65607 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65608
65609 -static atomic_t *prof_buffer;
65610 +static atomic_unchecked_t *prof_buffer;
65611 static unsigned long prof_len, prof_shift;
65612
65613 int prof_on __read_mostly;
65614 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65615 hits[i].pc = 0;
65616 continue;
65617 }
65618 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65619 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65620 hits[i].hits = hits[i].pc = 0;
65621 }
65622 }
65623 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65624 * Add the current hit(s) and flush the write-queue out
65625 * to the global buffer:
65626 */
65627 - atomic_add(nr_hits, &prof_buffer[pc]);
65628 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65629 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65630 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65631 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65632 hits[i].pc = hits[i].hits = 0;
65633 }
65634 out:
65635 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65636 {
65637 unsigned long pc;
65638 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65639 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65640 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65641 }
65642 #endif /* !CONFIG_SMP */
65643
65644 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65645 return -EFAULT;
65646 buf++; p++; count--; read++;
65647 }
65648 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65649 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65650 if (copy_to_user(buf, (void *)pnt, count))
65651 return -EFAULT;
65652 read += count;
65653 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65654 }
65655 #endif
65656 profile_discard_flip_buffers();
65657 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65658 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65659 return count;
65660 }
65661
65662 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65663 index 78ab24a..332c915 100644
65664 --- a/kernel/ptrace.c
65665 +++ b/kernel/ptrace.c
65666 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65667 return ret;
65668 }
65669
65670 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65671 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65672 + unsigned int log)
65673 {
65674 const struct cred *cred = current_cred(), *tcred;
65675
65676 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65677 cred->gid == tcred->sgid &&
65678 cred->gid == tcred->gid))
65679 goto ok;
65680 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65681 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65682 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65683 goto ok;
65684 rcu_read_unlock();
65685 return -EPERM;
65686 @@ -207,7 +209,9 @@ ok:
65687 smp_rmb();
65688 if (task->mm)
65689 dumpable = get_dumpable(task->mm);
65690 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65691 + if (!dumpable &&
65692 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65693 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65694 return -EPERM;
65695
65696 return security_ptrace_access_check(task, mode);
65697 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65698 {
65699 int err;
65700 task_lock(task);
65701 - err = __ptrace_may_access(task, mode);
65702 + err = __ptrace_may_access(task, mode, 0);
65703 + task_unlock(task);
65704 + return !err;
65705 +}
65706 +
65707 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65708 +{
65709 + return __ptrace_may_access(task, mode, 0);
65710 +}
65711 +
65712 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65713 +{
65714 + int err;
65715 + task_lock(task);
65716 + err = __ptrace_may_access(task, mode, 1);
65717 task_unlock(task);
65718 return !err;
65719 }
65720 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65721 goto out;
65722
65723 task_lock(task);
65724 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65725 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65726 task_unlock(task);
65727 if (retval)
65728 goto unlock_creds;
65729 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65730 task->ptrace = PT_PTRACED;
65731 if (seize)
65732 task->ptrace |= PT_SEIZED;
65733 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65734 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65735 task->ptrace |= PT_PTRACE_CAP;
65736
65737 __ptrace_link(task, current);
65738 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65739 break;
65740 return -EIO;
65741 }
65742 - if (copy_to_user(dst, buf, retval))
65743 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65744 return -EFAULT;
65745 copied += retval;
65746 src += retval;
65747 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65748 bool seized = child->ptrace & PT_SEIZED;
65749 int ret = -EIO;
65750 siginfo_t siginfo, *si;
65751 - void __user *datavp = (void __user *) data;
65752 + void __user *datavp = (__force void __user *) data;
65753 unsigned long __user *datalp = datavp;
65754 unsigned long flags;
65755
65756 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65757 goto out;
65758 }
65759
65760 + if (gr_handle_ptrace(child, request)) {
65761 + ret = -EPERM;
65762 + goto out_put_task_struct;
65763 + }
65764 +
65765 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65766 ret = ptrace_attach(child, request, data);
65767 /*
65768 * Some architectures need to do book-keeping after
65769 * a ptrace attach.
65770 */
65771 - if (!ret)
65772 + if (!ret) {
65773 arch_ptrace_attach(child);
65774 + gr_audit_ptrace(child);
65775 + }
65776 goto out_put_task_struct;
65777 }
65778
65779 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65780 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65781 if (copied != sizeof(tmp))
65782 return -EIO;
65783 - return put_user(tmp, (unsigned long __user *)data);
65784 + return put_user(tmp, (__force unsigned long __user *)data);
65785 }
65786
65787 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65788 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65789 goto out;
65790 }
65791
65792 + if (gr_handle_ptrace(child, request)) {
65793 + ret = -EPERM;
65794 + goto out_put_task_struct;
65795 + }
65796 +
65797 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65798 ret = ptrace_attach(child, request, data);
65799 /*
65800 * Some architectures need to do book-keeping after
65801 * a ptrace attach.
65802 */
65803 - if (!ret)
65804 + if (!ret) {
65805 arch_ptrace_attach(child);
65806 + gr_audit_ptrace(child);
65807 + }
65808 goto out_put_task_struct;
65809 }
65810
65811 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65812 index 764825c..3aa6ac4 100644
65813 --- a/kernel/rcutorture.c
65814 +++ b/kernel/rcutorture.c
65815 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65816 { 0 };
65817 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65818 { 0 };
65819 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65820 -static atomic_t n_rcu_torture_alloc;
65821 -static atomic_t n_rcu_torture_alloc_fail;
65822 -static atomic_t n_rcu_torture_free;
65823 -static atomic_t n_rcu_torture_mberror;
65824 -static atomic_t n_rcu_torture_error;
65825 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65826 +static atomic_unchecked_t n_rcu_torture_alloc;
65827 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65828 +static atomic_unchecked_t n_rcu_torture_free;
65829 +static atomic_unchecked_t n_rcu_torture_mberror;
65830 +static atomic_unchecked_t n_rcu_torture_error;
65831 static long n_rcu_torture_boost_ktrerror;
65832 static long n_rcu_torture_boost_rterror;
65833 static long n_rcu_torture_boost_failure;
65834 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65835
65836 spin_lock_bh(&rcu_torture_lock);
65837 if (list_empty(&rcu_torture_freelist)) {
65838 - atomic_inc(&n_rcu_torture_alloc_fail);
65839 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65840 spin_unlock_bh(&rcu_torture_lock);
65841 return NULL;
65842 }
65843 - atomic_inc(&n_rcu_torture_alloc);
65844 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65845 p = rcu_torture_freelist.next;
65846 list_del_init(p);
65847 spin_unlock_bh(&rcu_torture_lock);
65848 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65849 static void
65850 rcu_torture_free(struct rcu_torture *p)
65851 {
65852 - atomic_inc(&n_rcu_torture_free);
65853 + atomic_inc_unchecked(&n_rcu_torture_free);
65854 spin_lock_bh(&rcu_torture_lock);
65855 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65856 spin_unlock_bh(&rcu_torture_lock);
65857 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65858 i = rp->rtort_pipe_count;
65859 if (i > RCU_TORTURE_PIPE_LEN)
65860 i = RCU_TORTURE_PIPE_LEN;
65861 - atomic_inc(&rcu_torture_wcount[i]);
65862 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65863 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65864 rp->rtort_mbtest = 0;
65865 rcu_torture_free(rp);
65866 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65867 i = rp->rtort_pipe_count;
65868 if (i > RCU_TORTURE_PIPE_LEN)
65869 i = RCU_TORTURE_PIPE_LEN;
65870 - atomic_inc(&rcu_torture_wcount[i]);
65871 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65872 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65873 rp->rtort_mbtest = 0;
65874 list_del(&rp->rtort_free);
65875 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65876 i = old_rp->rtort_pipe_count;
65877 if (i > RCU_TORTURE_PIPE_LEN)
65878 i = RCU_TORTURE_PIPE_LEN;
65879 - atomic_inc(&rcu_torture_wcount[i]);
65880 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65881 old_rp->rtort_pipe_count++;
65882 cur_ops->deferred_free(old_rp);
65883 }
65884 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65885 return;
65886 }
65887 if (p->rtort_mbtest == 0)
65888 - atomic_inc(&n_rcu_torture_mberror);
65889 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65890 spin_lock(&rand_lock);
65891 cur_ops->read_delay(&rand);
65892 n_rcu_torture_timers++;
65893 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65894 continue;
65895 }
65896 if (p->rtort_mbtest == 0)
65897 - atomic_inc(&n_rcu_torture_mberror);
65898 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65899 cur_ops->read_delay(&rand);
65900 preempt_disable();
65901 pipe_count = p->rtort_pipe_count;
65902 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65903 rcu_torture_current,
65904 rcu_torture_current_version,
65905 list_empty(&rcu_torture_freelist),
65906 - atomic_read(&n_rcu_torture_alloc),
65907 - atomic_read(&n_rcu_torture_alloc_fail),
65908 - atomic_read(&n_rcu_torture_free),
65909 - atomic_read(&n_rcu_torture_mberror),
65910 + atomic_read_unchecked(&n_rcu_torture_alloc),
65911 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65912 + atomic_read_unchecked(&n_rcu_torture_free),
65913 + atomic_read_unchecked(&n_rcu_torture_mberror),
65914 n_rcu_torture_boost_ktrerror,
65915 n_rcu_torture_boost_rterror,
65916 n_rcu_torture_boost_failure,
65917 n_rcu_torture_boosts,
65918 n_rcu_torture_timers);
65919 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65920 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65921 n_rcu_torture_boost_ktrerror != 0 ||
65922 n_rcu_torture_boost_rterror != 0 ||
65923 n_rcu_torture_boost_failure != 0)
65924 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65925 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65926 if (i > 1) {
65927 cnt += sprintf(&page[cnt], "!!! ");
65928 - atomic_inc(&n_rcu_torture_error);
65929 + atomic_inc_unchecked(&n_rcu_torture_error);
65930 WARN_ON_ONCE(1);
65931 }
65932 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65933 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65934 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65935 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65936 cnt += sprintf(&page[cnt], " %d",
65937 - atomic_read(&rcu_torture_wcount[i]));
65938 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65939 }
65940 cnt += sprintf(&page[cnt], "\n");
65941 if (cur_ops->stats)
65942 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65943
65944 if (cur_ops->cleanup)
65945 cur_ops->cleanup();
65946 - if (atomic_read(&n_rcu_torture_error))
65947 + if (atomic_read_unchecked(&n_rcu_torture_error))
65948 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65949 else
65950 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65951 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65952
65953 rcu_torture_current = NULL;
65954 rcu_torture_current_version = 0;
65955 - atomic_set(&n_rcu_torture_alloc, 0);
65956 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65957 - atomic_set(&n_rcu_torture_free, 0);
65958 - atomic_set(&n_rcu_torture_mberror, 0);
65959 - atomic_set(&n_rcu_torture_error, 0);
65960 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65961 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65962 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65963 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65964 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65965 n_rcu_torture_boost_ktrerror = 0;
65966 n_rcu_torture_boost_rterror = 0;
65967 n_rcu_torture_boost_failure = 0;
65968 n_rcu_torture_boosts = 0;
65969 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65970 - atomic_set(&rcu_torture_wcount[i], 0);
65971 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65972 for_each_possible_cpu(cpu) {
65973 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65974 per_cpu(rcu_torture_count, cpu)[i] = 0;
65975 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65976 index 6b76d81..7afc1b3 100644
65977 --- a/kernel/rcutree.c
65978 +++ b/kernel/rcutree.c
65979 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65980 trace_rcu_dyntick("Start");
65981 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65982 smp_mb__before_atomic_inc(); /* See above. */
65983 - atomic_inc(&rdtp->dynticks);
65984 + atomic_inc_unchecked(&rdtp->dynticks);
65985 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65986 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65987 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65988 local_irq_restore(flags);
65989 }
65990
65991 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65992 return;
65993 }
65994 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65995 - atomic_inc(&rdtp->dynticks);
65996 + atomic_inc_unchecked(&rdtp->dynticks);
65997 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65998 smp_mb__after_atomic_inc(); /* See above. */
65999 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
66000 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
66001 trace_rcu_dyntick("End");
66002 local_irq_restore(flags);
66003 }
66004 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
66005 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
66006
66007 if (rdtp->dynticks_nmi_nesting == 0 &&
66008 - (atomic_read(&rdtp->dynticks) & 0x1))
66009 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
66010 return;
66011 rdtp->dynticks_nmi_nesting++;
66012 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
66013 - atomic_inc(&rdtp->dynticks);
66014 + atomic_inc_unchecked(&rdtp->dynticks);
66015 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
66016 smp_mb__after_atomic_inc(); /* See above. */
66017 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
66018 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
66019 }
66020
66021 /**
66022 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
66023 return;
66024 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
66025 smp_mb__before_atomic_inc(); /* See above. */
66026 - atomic_inc(&rdtp->dynticks);
66027 + atomic_inc_unchecked(&rdtp->dynticks);
66028 smp_mb__after_atomic_inc(); /* Force delay to next write. */
66029 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
66030 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
66031 }
66032
66033 /**
66034 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
66035 */
66036 static int dyntick_save_progress_counter(struct rcu_data *rdp)
66037 {
66038 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
66039 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66040 return 0;
66041 }
66042
66043 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
66044 unsigned int curr;
66045 unsigned int snap;
66046
66047 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
66048 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66049 snap = (unsigned int)rdp->dynticks_snap;
66050
66051 /*
66052 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
66053 /*
66054 * Do RCU core processing for the current CPU.
66055 */
66056 -static void rcu_process_callbacks(struct softirq_action *unused)
66057 +static void rcu_process_callbacks(void)
66058 {
66059 trace_rcu_utilization("Start RCU core");
66060 __rcu_process_callbacks(&rcu_sched_state,
66061 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
66062 index 849ce9e..74bc9de 100644
66063 --- a/kernel/rcutree.h
66064 +++ b/kernel/rcutree.h
66065 @@ -86,7 +86,7 @@
66066 struct rcu_dynticks {
66067 int dynticks_nesting; /* Track irq/process nesting level. */
66068 int dynticks_nmi_nesting; /* Track NMI nesting level. */
66069 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
66070 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
66071 };
66072
66073 /* RCU's kthread states for tracing. */
66074 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
66075 index 4b9b9f8..2326053 100644
66076 --- a/kernel/rcutree_plugin.h
66077 +++ b/kernel/rcutree_plugin.h
66078 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
66079
66080 /* Clean up and exit. */
66081 smp_mb(); /* ensure expedited GP seen before counter increment. */
66082 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
66083 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
66084 unlock_mb_ret:
66085 mutex_unlock(&sync_rcu_preempt_exp_mutex);
66086 mb_ret:
66087 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
66088
66089 #else /* #ifndef CONFIG_SMP */
66090
66091 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
66092 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
66093 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
66094 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
66095
66096 static int synchronize_sched_expedited_cpu_stop(void *data)
66097 {
66098 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
66099 int firstsnap, s, snap, trycount = 0;
66100
66101 /* Note that atomic_inc_return() implies full memory barrier. */
66102 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
66103 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
66104 get_online_cpus();
66105
66106 /*
66107 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
66108 }
66109
66110 /* Check to see if someone else did our work for us. */
66111 - s = atomic_read(&sync_sched_expedited_done);
66112 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66113 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
66114 smp_mb(); /* ensure test happens before caller kfree */
66115 return;
66116 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
66117 * grace period works for us.
66118 */
66119 get_online_cpus();
66120 - snap = atomic_read(&sync_sched_expedited_started) - 1;
66121 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
66122 smp_mb(); /* ensure read is before try_stop_cpus(). */
66123 }
66124
66125 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
66126 * than we did beat us to the punch.
66127 */
66128 do {
66129 - s = atomic_read(&sync_sched_expedited_done);
66130 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66131 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
66132 smp_mb(); /* ensure test happens before caller kfree */
66133 break;
66134 }
66135 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
66136 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
66137
66138 put_online_cpus();
66139 }
66140 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
66141 for_each_online_cpu(thatcpu) {
66142 if (thatcpu == cpu)
66143 continue;
66144 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
66145 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
66146 thatcpu).dynticks);
66147 smp_mb(); /* Order sampling of snap with end of grace period. */
66148 if ((snap & 0x1) != 0) {
66149 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
66150 index 9feffa4..54058df 100644
66151 --- a/kernel/rcutree_trace.c
66152 +++ b/kernel/rcutree_trace.c
66153 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
66154 rdp->qs_pending);
66155 #ifdef CONFIG_NO_HZ
66156 seq_printf(m, " dt=%d/%d/%d df=%lu",
66157 - atomic_read(&rdp->dynticks->dynticks),
66158 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66159 rdp->dynticks->dynticks_nesting,
66160 rdp->dynticks->dynticks_nmi_nesting,
66161 rdp->dynticks_fqs);
66162 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
66163 rdp->qs_pending);
66164 #ifdef CONFIG_NO_HZ
66165 seq_printf(m, ",%d,%d,%d,%lu",
66166 - atomic_read(&rdp->dynticks->dynticks),
66167 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66168 rdp->dynticks->dynticks_nesting,
66169 rdp->dynticks->dynticks_nmi_nesting,
66170 rdp->dynticks_fqs);
66171 diff --git a/kernel/resource.c b/kernel/resource.c
66172 index 7640b3a..5879283 100644
66173 --- a/kernel/resource.c
66174 +++ b/kernel/resource.c
66175 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
66176
66177 static int __init ioresources_init(void)
66178 {
66179 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66180 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66181 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
66182 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
66183 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66184 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
66185 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
66186 +#endif
66187 +#else
66188 proc_create("ioports", 0, NULL, &proc_ioports_operations);
66189 proc_create("iomem", 0, NULL, &proc_iomem_operations);
66190 +#endif
66191 return 0;
66192 }
66193 __initcall(ioresources_init);
66194 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
66195 index 3d9f31c..7fefc9e 100644
66196 --- a/kernel/rtmutex-tester.c
66197 +++ b/kernel/rtmutex-tester.c
66198 @@ -20,7 +20,7 @@
66199 #define MAX_RT_TEST_MUTEXES 8
66200
66201 static spinlock_t rttest_lock;
66202 -static atomic_t rttest_event;
66203 +static atomic_unchecked_t rttest_event;
66204
66205 struct test_thread_data {
66206 int opcode;
66207 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66208
66209 case RTTEST_LOCKCONT:
66210 td->mutexes[td->opdata] = 1;
66211 - td->event = atomic_add_return(1, &rttest_event);
66212 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66213 return 0;
66214
66215 case RTTEST_RESET:
66216 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66217 return 0;
66218
66219 case RTTEST_RESETEVENT:
66220 - atomic_set(&rttest_event, 0);
66221 + atomic_set_unchecked(&rttest_event, 0);
66222 return 0;
66223
66224 default:
66225 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66226 return ret;
66227
66228 td->mutexes[id] = 1;
66229 - td->event = atomic_add_return(1, &rttest_event);
66230 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66231 rt_mutex_lock(&mutexes[id]);
66232 - td->event = atomic_add_return(1, &rttest_event);
66233 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66234 td->mutexes[id] = 4;
66235 return 0;
66236
66237 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66238 return ret;
66239
66240 td->mutexes[id] = 1;
66241 - td->event = atomic_add_return(1, &rttest_event);
66242 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66243 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
66244 - td->event = atomic_add_return(1, &rttest_event);
66245 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66246 td->mutexes[id] = ret ? 0 : 4;
66247 return ret ? -EINTR : 0;
66248
66249 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66250 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
66251 return ret;
66252
66253 - td->event = atomic_add_return(1, &rttest_event);
66254 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66255 rt_mutex_unlock(&mutexes[id]);
66256 - td->event = atomic_add_return(1, &rttest_event);
66257 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66258 td->mutexes[id] = 0;
66259 return 0;
66260
66261 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66262 break;
66263
66264 td->mutexes[dat] = 2;
66265 - td->event = atomic_add_return(1, &rttest_event);
66266 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66267 break;
66268
66269 default:
66270 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66271 return;
66272
66273 td->mutexes[dat] = 3;
66274 - td->event = atomic_add_return(1, &rttest_event);
66275 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66276 break;
66277
66278 case RTTEST_LOCKNOWAIT:
66279 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66280 return;
66281
66282 td->mutexes[dat] = 1;
66283 - td->event = atomic_add_return(1, &rttest_event);
66284 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66285 return;
66286
66287 default:
66288 diff --git a/kernel/sched.c b/kernel/sched.c
66289 index d6b149c..896cbb8 100644
66290 --- a/kernel/sched.c
66291 +++ b/kernel/sched.c
66292 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
66293 BUG(); /* the idle class will always have a runnable task */
66294 }
66295
66296 +#ifdef CONFIG_GRKERNSEC_SETXID
66297 +extern void gr_delayed_cred_worker(void);
66298 +static inline void gr_cred_schedule(void)
66299 +{
66300 + if (unlikely(current->delayed_cred))
66301 + gr_delayed_cred_worker();
66302 +}
66303 +#else
66304 +static inline void gr_cred_schedule(void)
66305 +{
66306 +}
66307 +#endif
66308 +
66309 /*
66310 * __schedule() is the main scheduler function.
66311 */
66312 @@ -4408,6 +4421,8 @@ need_resched:
66313
66314 schedule_debug(prev);
66315
66316 + gr_cred_schedule();
66317 +
66318 if (sched_feat(HRTICK))
66319 hrtick_clear(rq);
66320
66321 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66322 /* convert nice value [19,-20] to rlimit style value [1,40] */
66323 int nice_rlim = 20 - nice;
66324
66325 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66326 +
66327 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66328 capable(CAP_SYS_NICE));
66329 }
66330 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66331 if (nice > 19)
66332 nice = 19;
66333
66334 - if (increment < 0 && !can_nice(current, nice))
66335 + if (increment < 0 && (!can_nice(current, nice) ||
66336 + gr_handle_chroot_nice()))
66337 return -EPERM;
66338
66339 retval = security_task_setnice(current, nice);
66340 @@ -5288,6 +5306,7 @@ recheck:
66341 unsigned long rlim_rtprio =
66342 task_rlimit(p, RLIMIT_RTPRIO);
66343
66344 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66345 /* can't set/change the rt policy */
66346 if (policy != p->policy && !rlim_rtprio)
66347 return -EPERM;
66348 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66349 index 429242f..d7cca82 100644
66350 --- a/kernel/sched_autogroup.c
66351 +++ b/kernel/sched_autogroup.c
66352 @@ -7,7 +7,7 @@
66353
66354 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66355 static struct autogroup autogroup_default;
66356 -static atomic_t autogroup_seq_nr;
66357 +static atomic_unchecked_t autogroup_seq_nr;
66358
66359 static void __init autogroup_init(struct task_struct *init_task)
66360 {
66361 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66362
66363 kref_init(&ag->kref);
66364 init_rwsem(&ag->lock);
66365 - ag->id = atomic_inc_return(&autogroup_seq_nr);
66366 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66367 ag->tg = tg;
66368 #ifdef CONFIG_RT_GROUP_SCHED
66369 /*
66370 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66371 index 8a39fa3..34f3dbc 100644
66372 --- a/kernel/sched_fair.c
66373 +++ b/kernel/sched_fair.c
66374 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66375 * run_rebalance_domains is triggered when needed from the scheduler tick.
66376 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66377 */
66378 -static void run_rebalance_domains(struct softirq_action *h)
66379 +static void run_rebalance_domains(void)
66380 {
66381 int this_cpu = smp_processor_id();
66382 struct rq *this_rq = cpu_rq(this_cpu);
66383 diff --git a/kernel/signal.c b/kernel/signal.c
66384 index 2065515..aed2987 100644
66385 --- a/kernel/signal.c
66386 +++ b/kernel/signal.c
66387 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66388
66389 int print_fatal_signals __read_mostly;
66390
66391 -static void __user *sig_handler(struct task_struct *t, int sig)
66392 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
66393 {
66394 return t->sighand->action[sig - 1].sa.sa_handler;
66395 }
66396
66397 -static int sig_handler_ignored(void __user *handler, int sig)
66398 +static int sig_handler_ignored(__sighandler_t handler, int sig)
66399 {
66400 /* Is it explicitly or implicitly ignored? */
66401 return handler == SIG_IGN ||
66402 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66403 static int sig_task_ignored(struct task_struct *t, int sig,
66404 int from_ancestor_ns)
66405 {
66406 - void __user *handler;
66407 + __sighandler_t handler;
66408
66409 handler = sig_handler(t, sig);
66410
66411 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66412 atomic_inc(&user->sigpending);
66413 rcu_read_unlock();
66414
66415 + if (!override_rlimit)
66416 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66417 +
66418 if (override_rlimit ||
66419 atomic_read(&user->sigpending) <=
66420 task_rlimit(t, RLIMIT_SIGPENDING)) {
66421 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66422
66423 int unhandled_signal(struct task_struct *tsk, int sig)
66424 {
66425 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66426 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66427 if (is_global_init(tsk))
66428 return 1;
66429 if (handler != SIG_IGN && handler != SIG_DFL)
66430 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66431 }
66432 }
66433
66434 + /* allow glibc communication via tgkill to other threads in our
66435 + thread group */
66436 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66437 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66438 + && gr_handle_signal(t, sig))
66439 + return -EPERM;
66440 +
66441 return security_task_kill(t, info, sig, 0);
66442 }
66443
66444 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66445 return send_signal(sig, info, p, 1);
66446 }
66447
66448 -static int
66449 +int
66450 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66451 {
66452 return send_signal(sig, info, t, 0);
66453 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66454 unsigned long int flags;
66455 int ret, blocked, ignored;
66456 struct k_sigaction *action;
66457 + int is_unhandled = 0;
66458
66459 spin_lock_irqsave(&t->sighand->siglock, flags);
66460 action = &t->sighand->action[sig-1];
66461 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66462 }
66463 if (action->sa.sa_handler == SIG_DFL)
66464 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66465 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66466 + is_unhandled = 1;
66467 ret = specific_send_sig_info(sig, info, t);
66468 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66469
66470 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66471 + normal operation */
66472 + if (is_unhandled) {
66473 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66474 + gr_handle_crash(t, sig);
66475 + }
66476 +
66477 return ret;
66478 }
66479
66480 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66481 ret = check_kill_permission(sig, info, p);
66482 rcu_read_unlock();
66483
66484 - if (!ret && sig)
66485 + if (!ret && sig) {
66486 ret = do_send_sig_info(sig, info, p, true);
66487 + if (!ret)
66488 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66489 + }
66490
66491 return ret;
66492 }
66493 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66494 int error = -ESRCH;
66495
66496 rcu_read_lock();
66497 - p = find_task_by_vpid(pid);
66498 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66499 + /* allow glibc communication via tgkill to other threads in our
66500 + thread group */
66501 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66502 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66503 + p = find_task_by_vpid_unrestricted(pid);
66504 + else
66505 +#endif
66506 + p = find_task_by_vpid(pid);
66507 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66508 error = check_kill_permission(sig, info, p);
66509 /*
66510 diff --git a/kernel/smp.c b/kernel/smp.c
66511 index db197d6..17aef0b 100644
66512 --- a/kernel/smp.c
66513 +++ b/kernel/smp.c
66514 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66515 }
66516 EXPORT_SYMBOL(smp_call_function);
66517
66518 -void ipi_call_lock(void)
66519 +void ipi_call_lock(void) __acquires(call_function.lock)
66520 {
66521 raw_spin_lock(&call_function.lock);
66522 }
66523
66524 -void ipi_call_unlock(void)
66525 +void ipi_call_unlock(void) __releases(call_function.lock)
66526 {
66527 raw_spin_unlock(&call_function.lock);
66528 }
66529
66530 -void ipi_call_lock_irq(void)
66531 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66532 {
66533 raw_spin_lock_irq(&call_function.lock);
66534 }
66535
66536 -void ipi_call_unlock_irq(void)
66537 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66538 {
66539 raw_spin_unlock_irq(&call_function.lock);
66540 }
66541 diff --git a/kernel/softirq.c b/kernel/softirq.c
66542 index 2c71d91..1021f81 100644
66543 --- a/kernel/softirq.c
66544 +++ b/kernel/softirq.c
66545 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66546
66547 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66548
66549 -char *softirq_to_name[NR_SOFTIRQS] = {
66550 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66551 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66552 "TASKLET", "SCHED", "HRTIMER", "RCU"
66553 };
66554 @@ -235,7 +235,7 @@ restart:
66555 kstat_incr_softirqs_this_cpu(vec_nr);
66556
66557 trace_softirq_entry(vec_nr);
66558 - h->action(h);
66559 + h->action();
66560 trace_softirq_exit(vec_nr);
66561 if (unlikely(prev_count != preempt_count())) {
66562 printk(KERN_ERR "huh, entered softirq %u %s %p"
66563 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66564 local_irq_restore(flags);
66565 }
66566
66567 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66568 +void open_softirq(int nr, void (*action)(void))
66569 {
66570 - softirq_vec[nr].action = action;
66571 + pax_open_kernel();
66572 + *(void **)&softirq_vec[nr].action = action;
66573 + pax_close_kernel();
66574 }
66575
66576 /*
66577 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66578
66579 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66580
66581 -static void tasklet_action(struct softirq_action *a)
66582 +static void tasklet_action(void)
66583 {
66584 struct tasklet_struct *list;
66585
66586 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66587 }
66588 }
66589
66590 -static void tasklet_hi_action(struct softirq_action *a)
66591 +static void tasklet_hi_action(void)
66592 {
66593 struct tasklet_struct *list;
66594
66595 diff --git a/kernel/sys.c b/kernel/sys.c
66596 index 481611f..0754d86 100644
66597 --- a/kernel/sys.c
66598 +++ b/kernel/sys.c
66599 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66600 error = -EACCES;
66601 goto out;
66602 }
66603 +
66604 + if (gr_handle_chroot_setpriority(p, niceval)) {
66605 + error = -EACCES;
66606 + goto out;
66607 + }
66608 +
66609 no_nice = security_task_setnice(p, niceval);
66610 if (no_nice) {
66611 error = no_nice;
66612 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66613 goto error;
66614 }
66615
66616 + if (gr_check_group_change(new->gid, new->egid, -1))
66617 + goto error;
66618 +
66619 if (rgid != (gid_t) -1 ||
66620 (egid != (gid_t) -1 && egid != old->gid))
66621 new->sgid = new->egid;
66622 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66623 old = current_cred();
66624
66625 retval = -EPERM;
66626 +
66627 + if (gr_check_group_change(gid, gid, gid))
66628 + goto error;
66629 +
66630 if (nsown_capable(CAP_SETGID))
66631 new->gid = new->egid = new->sgid = new->fsgid = gid;
66632 else if (gid == old->gid || gid == old->sgid)
66633 @@ -618,7 +631,7 @@ error:
66634 /*
66635 * change the user struct in a credentials set to match the new UID
66636 */
66637 -static int set_user(struct cred *new)
66638 +int set_user(struct cred *new)
66639 {
66640 struct user_struct *new_user;
66641
66642 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66643 goto error;
66644 }
66645
66646 + if (gr_check_user_change(new->uid, new->euid, -1))
66647 + goto error;
66648 +
66649 if (new->uid != old->uid) {
66650 retval = set_user(new);
66651 if (retval < 0)
66652 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66653 old = current_cred();
66654
66655 retval = -EPERM;
66656 +
66657 + if (gr_check_crash_uid(uid))
66658 + goto error;
66659 + if (gr_check_user_change(uid, uid, uid))
66660 + goto error;
66661 +
66662 if (nsown_capable(CAP_SETUID)) {
66663 new->suid = new->uid = uid;
66664 if (uid != old->uid) {
66665 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66666 goto error;
66667 }
66668
66669 + if (gr_check_user_change(ruid, euid, -1))
66670 + goto error;
66671 +
66672 if (ruid != (uid_t) -1) {
66673 new->uid = ruid;
66674 if (ruid != old->uid) {
66675 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66676 goto error;
66677 }
66678
66679 + if (gr_check_group_change(rgid, egid, -1))
66680 + goto error;
66681 +
66682 if (rgid != (gid_t) -1)
66683 new->gid = rgid;
66684 if (egid != (gid_t) -1)
66685 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66686 old = current_cred();
66687 old_fsuid = old->fsuid;
66688
66689 + if (gr_check_user_change(-1, -1, uid))
66690 + goto error;
66691 +
66692 if (uid == old->uid || uid == old->euid ||
66693 uid == old->suid || uid == old->fsuid ||
66694 nsown_capable(CAP_SETUID)) {
66695 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66696 }
66697 }
66698
66699 +error:
66700 abort_creds(new);
66701 return old_fsuid;
66702
66703 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66704 if (gid == old->gid || gid == old->egid ||
66705 gid == old->sgid || gid == old->fsgid ||
66706 nsown_capable(CAP_SETGID)) {
66707 + if (gr_check_group_change(-1, -1, gid))
66708 + goto error;
66709 +
66710 if (gid != old_fsgid) {
66711 new->fsgid = gid;
66712 goto change_okay;
66713 }
66714 }
66715
66716 +error:
66717 abort_creds(new);
66718 return old_fsgid;
66719
66720 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66721 }
66722 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66723 snprintf(buf, len, "2.6.%u%s", v, rest);
66724 - ret = copy_to_user(release, buf, len);
66725 + if (len > sizeof(buf))
66726 + ret = -EFAULT;
66727 + else
66728 + ret = copy_to_user(release, buf, len);
66729 }
66730 return ret;
66731 }
66732 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66733 return -EFAULT;
66734
66735 down_read(&uts_sem);
66736 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66737 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66738 __OLD_UTS_LEN);
66739 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66740 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66741 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66742 __OLD_UTS_LEN);
66743 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66744 - error |= __copy_to_user(&name->release, &utsname()->release,
66745 + error |= __copy_to_user(name->release, &utsname()->release,
66746 __OLD_UTS_LEN);
66747 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66748 - error |= __copy_to_user(&name->version, &utsname()->version,
66749 + error |= __copy_to_user(name->version, &utsname()->version,
66750 __OLD_UTS_LEN);
66751 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66752 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66753 + error |= __copy_to_user(name->machine, &utsname()->machine,
66754 __OLD_UTS_LEN);
66755 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66756 up_read(&uts_sem);
66757 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66758 error = get_dumpable(me->mm);
66759 break;
66760 case PR_SET_DUMPABLE:
66761 - if (arg2 < 0 || arg2 > 1) {
66762 + if (arg2 > 1) {
66763 error = -EINVAL;
66764 break;
66765 }
66766 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66767 index ae27196..7506d69 100644
66768 --- a/kernel/sysctl.c
66769 +++ b/kernel/sysctl.c
66770 @@ -86,6 +86,13 @@
66771
66772
66773 #if defined(CONFIG_SYSCTL)
66774 +#include <linux/grsecurity.h>
66775 +#include <linux/grinternal.h>
66776 +
66777 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66778 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66779 + const int op);
66780 +extern int gr_handle_chroot_sysctl(const int op);
66781
66782 /* External variables not in a header file. */
66783 extern int sysctl_overcommit_memory;
66784 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66785 }
66786
66787 #endif
66788 +extern struct ctl_table grsecurity_table[];
66789
66790 static struct ctl_table root_table[];
66791 static struct ctl_table_root sysctl_table_root;
66792 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66793 int sysctl_legacy_va_layout;
66794 #endif
66795
66796 +#ifdef CONFIG_PAX_SOFTMODE
66797 +static ctl_table pax_table[] = {
66798 + {
66799 + .procname = "softmode",
66800 + .data = &pax_softmode,
66801 + .maxlen = sizeof(unsigned int),
66802 + .mode = 0600,
66803 + .proc_handler = &proc_dointvec,
66804 + },
66805 +
66806 + { }
66807 +};
66808 +#endif
66809 +
66810 /* The default sysctl tables: */
66811
66812 static struct ctl_table root_table[] = {
66813 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66814 #endif
66815
66816 static struct ctl_table kern_table[] = {
66817 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66818 + {
66819 + .procname = "grsecurity",
66820 + .mode = 0500,
66821 + .child = grsecurity_table,
66822 + },
66823 +#endif
66824 +
66825 +#ifdef CONFIG_PAX_SOFTMODE
66826 + {
66827 + .procname = "pax",
66828 + .mode = 0500,
66829 + .child = pax_table,
66830 + },
66831 +#endif
66832 +
66833 {
66834 .procname = "sched_child_runs_first",
66835 .data = &sysctl_sched_child_runs_first,
66836 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66837 .data = &modprobe_path,
66838 .maxlen = KMOD_PATH_LEN,
66839 .mode = 0644,
66840 - .proc_handler = proc_dostring,
66841 + .proc_handler = proc_dostring_modpriv,
66842 },
66843 {
66844 .procname = "modules_disabled",
66845 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66846 .extra1 = &zero,
66847 .extra2 = &one,
66848 },
66849 +#endif
66850 {
66851 .procname = "kptr_restrict",
66852 .data = &kptr_restrict,
66853 .maxlen = sizeof(int),
66854 .mode = 0644,
66855 .proc_handler = proc_dmesg_restrict,
66856 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66857 + .extra1 = &two,
66858 +#else
66859 .extra1 = &zero,
66860 +#endif
66861 .extra2 = &two,
66862 },
66863 -#endif
66864 {
66865 .procname = "ngroups_max",
66866 .data = &ngroups_max,
66867 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66868 .proc_handler = proc_dointvec_minmax,
66869 .extra1 = &zero,
66870 },
66871 + {
66872 + .procname = "heap_stack_gap",
66873 + .data = &sysctl_heap_stack_gap,
66874 + .maxlen = sizeof(sysctl_heap_stack_gap),
66875 + .mode = 0644,
66876 + .proc_handler = proc_doulongvec_minmax,
66877 + },
66878 #else
66879 {
66880 .procname = "nr_trim_pages",
66881 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66882 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66883 {
66884 int mode;
66885 + int error;
66886 +
66887 + if (table->parent != NULL && table->parent->procname != NULL &&
66888 + table->procname != NULL &&
66889 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66890 + return -EACCES;
66891 + if (gr_handle_chroot_sysctl(op))
66892 + return -EACCES;
66893 + error = gr_handle_sysctl(table, op);
66894 + if (error)
66895 + return error;
66896
66897 if (root->permissions)
66898 mode = root->permissions(root, current->nsproxy, table);
66899 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66900 buffer, lenp, ppos);
66901 }
66902
66903 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66904 + void __user *buffer, size_t *lenp, loff_t *ppos)
66905 +{
66906 + if (write && !capable(CAP_SYS_MODULE))
66907 + return -EPERM;
66908 +
66909 + return _proc_do_string(table->data, table->maxlen, write,
66910 + buffer, lenp, ppos);
66911 +}
66912 +
66913 static size_t proc_skip_spaces(char **buf)
66914 {
66915 size_t ret;
66916 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66917 len = strlen(tmp);
66918 if (len > *size)
66919 len = *size;
66920 + if (len > sizeof(tmp))
66921 + len = sizeof(tmp);
66922 if (copy_to_user(*buf, tmp, len))
66923 return -EFAULT;
66924 *size -= len;
66925 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66926 *i = val;
66927 } else {
66928 val = convdiv * (*i) / convmul;
66929 - if (!first)
66930 + if (!first) {
66931 err = proc_put_char(&buffer, &left, '\t');
66932 + if (err)
66933 + break;
66934 + }
66935 err = proc_put_long(&buffer, &left, val, false);
66936 if (err)
66937 break;
66938 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66939 return -ENOSYS;
66940 }
66941
66942 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66943 + void __user *buffer, size_t *lenp, loff_t *ppos)
66944 +{
66945 + return -ENOSYS;
66946 +}
66947 +
66948 int proc_dointvec(struct ctl_table *table, int write,
66949 void __user *buffer, size_t *lenp, loff_t *ppos)
66950 {
66951 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66952 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66953 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66954 EXPORT_SYMBOL(proc_dostring);
66955 +EXPORT_SYMBOL(proc_dostring_modpriv);
66956 EXPORT_SYMBOL(proc_doulongvec_minmax);
66957 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66958 EXPORT_SYMBOL(register_sysctl_table);
66959 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66960 index a650694..aaeeb20 100644
66961 --- a/kernel/sysctl_binary.c
66962 +++ b/kernel/sysctl_binary.c
66963 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66964 int i;
66965
66966 set_fs(KERNEL_DS);
66967 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66968 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66969 set_fs(old_fs);
66970 if (result < 0)
66971 goto out_kfree;
66972 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66973 }
66974
66975 set_fs(KERNEL_DS);
66976 - result = vfs_write(file, buffer, str - buffer, &pos);
66977 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66978 set_fs(old_fs);
66979 if (result < 0)
66980 goto out_kfree;
66981 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66982 int i;
66983
66984 set_fs(KERNEL_DS);
66985 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66986 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66987 set_fs(old_fs);
66988 if (result < 0)
66989 goto out_kfree;
66990 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66991 }
66992
66993 set_fs(KERNEL_DS);
66994 - result = vfs_write(file, buffer, str - buffer, &pos);
66995 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66996 set_fs(old_fs);
66997 if (result < 0)
66998 goto out_kfree;
66999 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
67000 int i;
67001
67002 set_fs(KERNEL_DS);
67003 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
67004 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
67005 set_fs(old_fs);
67006 if (result < 0)
67007 goto out;
67008 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
67009 __le16 dnaddr;
67010
67011 set_fs(KERNEL_DS);
67012 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
67013 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
67014 set_fs(old_fs);
67015 if (result < 0)
67016 goto out;
67017 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
67018 le16_to_cpu(dnaddr) & 0x3ff);
67019
67020 set_fs(KERNEL_DS);
67021 - result = vfs_write(file, buf, len, &pos);
67022 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
67023 set_fs(old_fs);
67024 if (result < 0)
67025 goto out;
67026 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
67027 index 362da65..ab8ef8c 100644
67028 --- a/kernel/sysctl_check.c
67029 +++ b/kernel/sysctl_check.c
67030 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
67031 set_fail(&fail, table, "Directory with extra2");
67032 } else {
67033 if ((table->proc_handler == proc_dostring) ||
67034 + (table->proc_handler == proc_dostring_modpriv) ||
67035 (table->proc_handler == proc_dointvec) ||
67036 (table->proc_handler == proc_dointvec_minmax) ||
67037 (table->proc_handler == proc_dointvec_jiffies) ||
67038 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
67039 index e660464..c8b9e67 100644
67040 --- a/kernel/taskstats.c
67041 +++ b/kernel/taskstats.c
67042 @@ -27,9 +27,12 @@
67043 #include <linux/cgroup.h>
67044 #include <linux/fs.h>
67045 #include <linux/file.h>
67046 +#include <linux/grsecurity.h>
67047 #include <net/genetlink.h>
67048 #include <linux/atomic.h>
67049
67050 +extern int gr_is_taskstats_denied(int pid);
67051 +
67052 /*
67053 * Maximum length of a cpumask that can be specified in
67054 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
67055 @@ -556,6 +559,9 @@ err:
67056
67057 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
67058 {
67059 + if (gr_is_taskstats_denied(current->pid))
67060 + return -EACCES;
67061 +
67062 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
67063 return cmd_attr_register_cpumask(info);
67064 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
67065 diff --git a/kernel/time.c b/kernel/time.c
67066 index 73e416d..cfc6f69 100644
67067 --- a/kernel/time.c
67068 +++ b/kernel/time.c
67069 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
67070 return error;
67071
67072 if (tz) {
67073 + /* we log in do_settimeofday called below, so don't log twice
67074 + */
67075 + if (!tv)
67076 + gr_log_timechange();
67077 +
67078 /* SMP safe, global irq locking makes it work. */
67079 sys_tz = *tz;
67080 update_vsyscall_tz();
67081 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
67082 index 8a46f5d..bbe6f9c 100644
67083 --- a/kernel/time/alarmtimer.c
67084 +++ b/kernel/time/alarmtimer.c
67085 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
67086 struct platform_device *pdev;
67087 int error = 0;
67088 int i;
67089 - struct k_clock alarm_clock = {
67090 + static struct k_clock alarm_clock = {
67091 .clock_getres = alarm_clock_getres,
67092 .clock_get = alarm_clock_get,
67093 .timer_create = alarm_timer_create,
67094 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
67095 index fd4a7b1..fae5c2a 100644
67096 --- a/kernel/time/tick-broadcast.c
67097 +++ b/kernel/time/tick-broadcast.c
67098 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
67099 * then clear the broadcast bit.
67100 */
67101 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
67102 - int cpu = smp_processor_id();
67103 + cpu = smp_processor_id();
67104
67105 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
67106 tick_broadcast_clear_oneshot(cpu);
67107 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
67108 index 2378413..be455fd 100644
67109 --- a/kernel/time/timekeeping.c
67110 +++ b/kernel/time/timekeeping.c
67111 @@ -14,6 +14,7 @@
67112 #include <linux/init.h>
67113 #include <linux/mm.h>
67114 #include <linux/sched.h>
67115 +#include <linux/grsecurity.h>
67116 #include <linux/syscore_ops.h>
67117 #include <linux/clocksource.h>
67118 #include <linux/jiffies.h>
67119 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
67120 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
67121 return -EINVAL;
67122
67123 + gr_log_timechange();
67124 +
67125 write_seqlock_irqsave(&xtime_lock, flags);
67126
67127 timekeeping_forward_now();
67128 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
67129 index 3258455..f35227d 100644
67130 --- a/kernel/time/timer_list.c
67131 +++ b/kernel/time/timer_list.c
67132 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
67133
67134 static void print_name_offset(struct seq_file *m, void *sym)
67135 {
67136 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67137 + SEQ_printf(m, "<%p>", NULL);
67138 +#else
67139 char symname[KSYM_NAME_LEN];
67140
67141 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
67142 SEQ_printf(m, "<%pK>", sym);
67143 else
67144 SEQ_printf(m, "%s", symname);
67145 +#endif
67146 }
67147
67148 static void
67149 @@ -112,7 +116,11 @@ next_one:
67150 static void
67151 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
67152 {
67153 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67154 + SEQ_printf(m, " .base: %p\n", NULL);
67155 +#else
67156 SEQ_printf(m, " .base: %pK\n", base);
67157 +#endif
67158 SEQ_printf(m, " .index: %d\n",
67159 base->index);
67160 SEQ_printf(m, " .resolution: %Lu nsecs\n",
67161 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
67162 {
67163 struct proc_dir_entry *pe;
67164
67165 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67166 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
67167 +#else
67168 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
67169 +#endif
67170 if (!pe)
67171 return -ENOMEM;
67172 return 0;
67173 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
67174 index 0b537f2..9e71eca 100644
67175 --- a/kernel/time/timer_stats.c
67176 +++ b/kernel/time/timer_stats.c
67177 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
67178 static unsigned long nr_entries;
67179 static struct entry entries[MAX_ENTRIES];
67180
67181 -static atomic_t overflow_count;
67182 +static atomic_unchecked_t overflow_count;
67183
67184 /*
67185 * The entries are in a hash-table, for fast lookup:
67186 @@ -140,7 +140,7 @@ static void reset_entries(void)
67187 nr_entries = 0;
67188 memset(entries, 0, sizeof(entries));
67189 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
67190 - atomic_set(&overflow_count, 0);
67191 + atomic_set_unchecked(&overflow_count, 0);
67192 }
67193
67194 static struct entry *alloc_entry(void)
67195 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67196 if (likely(entry))
67197 entry->count++;
67198 else
67199 - atomic_inc(&overflow_count);
67200 + atomic_inc_unchecked(&overflow_count);
67201
67202 out_unlock:
67203 raw_spin_unlock_irqrestore(lock, flags);
67204 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67205
67206 static void print_name_offset(struct seq_file *m, unsigned long addr)
67207 {
67208 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67209 + seq_printf(m, "<%p>", NULL);
67210 +#else
67211 char symname[KSYM_NAME_LEN];
67212
67213 if (lookup_symbol_name(addr, symname) < 0)
67214 seq_printf(m, "<%p>", (void *)addr);
67215 else
67216 seq_printf(m, "%s", symname);
67217 +#endif
67218 }
67219
67220 static int tstats_show(struct seq_file *m, void *v)
67221 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
67222
67223 seq_puts(m, "Timer Stats Version: v0.2\n");
67224 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
67225 - if (atomic_read(&overflow_count))
67226 + if (atomic_read_unchecked(&overflow_count))
67227 seq_printf(m, "Overflow: %d entries\n",
67228 - atomic_read(&overflow_count));
67229 + atomic_read_unchecked(&overflow_count));
67230
67231 for (i = 0; i < nr_entries; i++) {
67232 entry = entries + i;
67233 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
67234 {
67235 struct proc_dir_entry *pe;
67236
67237 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67238 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
67239 +#else
67240 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
67241 +#endif
67242 if (!pe)
67243 return -ENOMEM;
67244 return 0;
67245 diff --git a/kernel/timer.c b/kernel/timer.c
67246 index 9c3c62b..441690e 100644
67247 --- a/kernel/timer.c
67248 +++ b/kernel/timer.c
67249 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
67250 /*
67251 * This function runs timers and the timer-tq in bottom half context.
67252 */
67253 -static void run_timer_softirq(struct softirq_action *h)
67254 +static void run_timer_softirq(void)
67255 {
67256 struct tvec_base *base = __this_cpu_read(tvec_bases);
67257
67258 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
67259 index 16fc34a..efd8bb8 100644
67260 --- a/kernel/trace/blktrace.c
67261 +++ b/kernel/trace/blktrace.c
67262 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
67263 struct blk_trace *bt = filp->private_data;
67264 char buf[16];
67265
67266 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
67267 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
67268
67269 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
67270 }
67271 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
67272 return 1;
67273
67274 bt = buf->chan->private_data;
67275 - atomic_inc(&bt->dropped);
67276 + atomic_inc_unchecked(&bt->dropped);
67277 return 0;
67278 }
67279
67280 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
67281
67282 bt->dir = dir;
67283 bt->dev = dev;
67284 - atomic_set(&bt->dropped, 0);
67285 + atomic_set_unchecked(&bt->dropped, 0);
67286
67287 ret = -EIO;
67288 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
67289 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
67290 index 25b4f4d..6f4772d 100644
67291 --- a/kernel/trace/ftrace.c
67292 +++ b/kernel/trace/ftrace.c
67293 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
67294 if (unlikely(ftrace_disabled))
67295 return 0;
67296
67297 + ret = ftrace_arch_code_modify_prepare();
67298 + FTRACE_WARN_ON(ret);
67299 + if (ret)
67300 + return 0;
67301 +
67302 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67303 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67304 if (ret) {
67305 ftrace_bug(ret, ip);
67306 - return 0;
67307 }
67308 - return 1;
67309 + return ret ? 0 : 1;
67310 }
67311
67312 /*
67313 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67314
67315 int
67316 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67317 - void *data)
67318 + void *data)
67319 {
67320 struct ftrace_func_probe *entry;
67321 struct ftrace_page *pg;
67322 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67323 index f2bd275..adaf3a2 100644
67324 --- a/kernel/trace/trace.c
67325 +++ b/kernel/trace/trace.c
67326 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67327 };
67328 #endif
67329
67330 -static struct dentry *d_tracer;
67331 -
67332 struct dentry *tracing_init_dentry(void)
67333 {
67334 + static struct dentry *d_tracer;
67335 static int once;
67336
67337 if (d_tracer)
67338 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67339 return d_tracer;
67340 }
67341
67342 -static struct dentry *d_percpu;
67343 -
67344 struct dentry *tracing_dentry_percpu(void)
67345 {
67346 + static struct dentry *d_percpu;
67347 static int once;
67348 struct dentry *d_tracer;
67349
67350 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67351 index c212a7f..7b02394 100644
67352 --- a/kernel/trace/trace_events.c
67353 +++ b/kernel/trace/trace_events.c
67354 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67355 struct ftrace_module_file_ops {
67356 struct list_head list;
67357 struct module *mod;
67358 - struct file_operations id;
67359 - struct file_operations enable;
67360 - struct file_operations format;
67361 - struct file_operations filter;
67362 };
67363
67364 static struct ftrace_module_file_ops *
67365 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67366
67367 file_ops->mod = mod;
67368
67369 - file_ops->id = ftrace_event_id_fops;
67370 - file_ops->id.owner = mod;
67371 -
67372 - file_ops->enable = ftrace_enable_fops;
67373 - file_ops->enable.owner = mod;
67374 -
67375 - file_ops->filter = ftrace_event_filter_fops;
67376 - file_ops->filter.owner = mod;
67377 -
67378 - file_ops->format = ftrace_event_format_fops;
67379 - file_ops->format.owner = mod;
67380 + pax_open_kernel();
67381 + *(void **)&mod->trace_id.owner = mod;
67382 + *(void **)&mod->trace_enable.owner = mod;
67383 + *(void **)&mod->trace_filter.owner = mod;
67384 + *(void **)&mod->trace_format.owner = mod;
67385 + pax_close_kernel();
67386
67387 list_add(&file_ops->list, &ftrace_module_file_list);
67388
67389 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67390
67391 for_each_event(call, start, end) {
67392 __trace_add_event_call(*call, mod,
67393 - &file_ops->id, &file_ops->enable,
67394 - &file_ops->filter, &file_ops->format);
67395 + &mod->trace_id, &mod->trace_enable,
67396 + &mod->trace_filter, &mod->trace_format);
67397 }
67398 }
67399
67400 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67401 index 00d527c..7c5b1a3 100644
67402 --- a/kernel/trace/trace_kprobe.c
67403 +++ b/kernel/trace/trace_kprobe.c
67404 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67405 long ret;
67406 int maxlen = get_rloc_len(*(u32 *)dest);
67407 u8 *dst = get_rloc_data(dest);
67408 - u8 *src = addr;
67409 + const u8 __user *src = (const u8 __force_user *)addr;
67410 mm_segment_t old_fs = get_fs();
67411 if (!maxlen)
67412 return;
67413 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67414 pagefault_disable();
67415 do
67416 ret = __copy_from_user_inatomic(dst++, src++, 1);
67417 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67418 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67419 dst[-1] = '\0';
67420 pagefault_enable();
67421 set_fs(old_fs);
67422 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67423 ((u8 *)get_rloc_data(dest))[0] = '\0';
67424 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67425 } else
67426 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67427 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67428 get_rloc_offs(*(u32 *)dest));
67429 }
67430 /* Return the length of string -- including null terminal byte */
67431 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67432 set_fs(KERNEL_DS);
67433 pagefault_disable();
67434 do {
67435 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67436 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67437 len++;
67438 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67439 pagefault_enable();
67440 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67441 index fd3c8aa..5f324a6 100644
67442 --- a/kernel/trace/trace_mmiotrace.c
67443 +++ b/kernel/trace/trace_mmiotrace.c
67444 @@ -24,7 +24,7 @@ struct header_iter {
67445 static struct trace_array *mmio_trace_array;
67446 static bool overrun_detected;
67447 static unsigned long prev_overruns;
67448 -static atomic_t dropped_count;
67449 +static atomic_unchecked_t dropped_count;
67450
67451 static void mmio_reset_data(struct trace_array *tr)
67452 {
67453 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67454
67455 static unsigned long count_overruns(struct trace_iterator *iter)
67456 {
67457 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
67458 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67459 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67460
67461 if (over > prev_overruns)
67462 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67463 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67464 sizeof(*entry), 0, pc);
67465 if (!event) {
67466 - atomic_inc(&dropped_count);
67467 + atomic_inc_unchecked(&dropped_count);
67468 return;
67469 }
67470 entry = ring_buffer_event_data(event);
67471 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67472 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67473 sizeof(*entry), 0, pc);
67474 if (!event) {
67475 - atomic_inc(&dropped_count);
67476 + atomic_inc_unchecked(&dropped_count);
67477 return;
67478 }
67479 entry = ring_buffer_event_data(event);
67480 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67481 index 5199930..26c73a0 100644
67482 --- a/kernel/trace/trace_output.c
67483 +++ b/kernel/trace/trace_output.c
67484 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67485
67486 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67487 if (!IS_ERR(p)) {
67488 - p = mangle_path(s->buffer + s->len, p, "\n");
67489 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67490 if (p) {
67491 s->len = p - s->buffer;
67492 return 1;
67493 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67494 index 77575b3..6e623d1 100644
67495 --- a/kernel/trace/trace_stack.c
67496 +++ b/kernel/trace/trace_stack.c
67497 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67498 return;
67499
67500 /* we do not handle interrupt stacks yet */
67501 - if (!object_is_on_stack(&this_size))
67502 + if (!object_starts_on_stack(&this_size))
67503 return;
67504
67505 local_irq_save(flags);
67506 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67507 index 209b379..7f76423 100644
67508 --- a/kernel/trace/trace_workqueue.c
67509 +++ b/kernel/trace/trace_workqueue.c
67510 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67511 int cpu;
67512 pid_t pid;
67513 /* Can be inserted from interrupt or user context, need to be atomic */
67514 - atomic_t inserted;
67515 + atomic_unchecked_t inserted;
67516 /*
67517 * Don't need to be atomic, works are serialized in a single workqueue thread
67518 * on a single CPU.
67519 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67520 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67521 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67522 if (node->pid == wq_thread->pid) {
67523 - atomic_inc(&node->inserted);
67524 + atomic_inc_unchecked(&node->inserted);
67525 goto found;
67526 }
67527 }
67528 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67529 tsk = get_pid_task(pid, PIDTYPE_PID);
67530 if (tsk) {
67531 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67532 - atomic_read(&cws->inserted), cws->executed,
67533 + atomic_read_unchecked(&cws->inserted), cws->executed,
67534 tsk->comm);
67535 put_task_struct(tsk);
67536 }
67537 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67538 index 82928f5..92da771 100644
67539 --- a/lib/Kconfig.debug
67540 +++ b/lib/Kconfig.debug
67541 @@ -1103,6 +1103,7 @@ config LATENCYTOP
67542 depends on DEBUG_KERNEL
67543 depends on STACKTRACE_SUPPORT
67544 depends on PROC_FS
67545 + depends on !GRKERNSEC_HIDESYM
67546 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67547 select KALLSYMS
67548 select KALLSYMS_ALL
67549 diff --git a/lib/bitmap.c b/lib/bitmap.c
67550 index 0d4a127..33a06c7 100644
67551 --- a/lib/bitmap.c
67552 +++ b/lib/bitmap.c
67553 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67554 {
67555 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67556 u32 chunk;
67557 - const char __user __force *ubuf = (const char __user __force *)buf;
67558 + const char __user *ubuf = (const char __force_user *)buf;
67559
67560 bitmap_zero(maskp, nmaskbits);
67561
67562 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67563 {
67564 if (!access_ok(VERIFY_READ, ubuf, ulen))
67565 return -EFAULT;
67566 - return __bitmap_parse((const char __force *)ubuf,
67567 + return __bitmap_parse((const char __force_kernel *)ubuf,
67568 ulen, 1, maskp, nmaskbits);
67569
67570 }
67571 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67572 {
67573 unsigned a, b;
67574 int c, old_c, totaldigits;
67575 - const char __user __force *ubuf = (const char __user __force *)buf;
67576 + const char __user *ubuf = (const char __force_user *)buf;
67577 int exp_digit, in_range;
67578
67579 totaldigits = c = 0;
67580 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67581 {
67582 if (!access_ok(VERIFY_READ, ubuf, ulen))
67583 return -EFAULT;
67584 - return __bitmap_parselist((const char __force *)ubuf,
67585 + return __bitmap_parselist((const char __force_kernel *)ubuf,
67586 ulen, 1, maskp, nmaskbits);
67587 }
67588 EXPORT_SYMBOL(bitmap_parselist_user);
67589 diff --git a/lib/bug.c b/lib/bug.c
67590 index 1955209..cbbb2ad 100644
67591 --- a/lib/bug.c
67592 +++ b/lib/bug.c
67593 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67594 return BUG_TRAP_TYPE_NONE;
67595
67596 bug = find_bug(bugaddr);
67597 + if (!bug)
67598 + return BUG_TRAP_TYPE_NONE;
67599
67600 file = NULL;
67601 line = 0;
67602 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67603 index a78b7c6..2c73084 100644
67604 --- a/lib/debugobjects.c
67605 +++ b/lib/debugobjects.c
67606 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67607 if (limit > 4)
67608 return;
67609
67610 - is_on_stack = object_is_on_stack(addr);
67611 + is_on_stack = object_starts_on_stack(addr);
67612 if (is_on_stack == onstack)
67613 return;
67614
67615 diff --git a/lib/devres.c b/lib/devres.c
67616 index 7c0e953..f642b5c 100644
67617 --- a/lib/devres.c
67618 +++ b/lib/devres.c
67619 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67620 void devm_iounmap(struct device *dev, void __iomem *addr)
67621 {
67622 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67623 - (void *)addr));
67624 + (void __force *)addr));
67625 iounmap(addr);
67626 }
67627 EXPORT_SYMBOL(devm_iounmap);
67628 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67629 {
67630 ioport_unmap(addr);
67631 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67632 - devm_ioport_map_match, (void *)addr));
67633 + devm_ioport_map_match, (void __force *)addr));
67634 }
67635 EXPORT_SYMBOL(devm_ioport_unmap);
67636
67637 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67638 index fea790a..ebb0e82 100644
67639 --- a/lib/dma-debug.c
67640 +++ b/lib/dma-debug.c
67641 @@ -925,7 +925,7 @@ out:
67642
67643 static void check_for_stack(struct device *dev, void *addr)
67644 {
67645 - if (object_is_on_stack(addr))
67646 + if (object_starts_on_stack(addr))
67647 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67648 "stack [addr=%p]\n", addr);
67649 }
67650 diff --git a/lib/extable.c b/lib/extable.c
67651 index 4cac81e..63e9b8f 100644
67652 --- a/lib/extable.c
67653 +++ b/lib/extable.c
67654 @@ -13,6 +13,7 @@
67655 #include <linux/init.h>
67656 #include <linux/sort.h>
67657 #include <asm/uaccess.h>
67658 +#include <asm/pgtable.h>
67659
67660 #ifndef ARCH_HAS_SORT_EXTABLE
67661 /*
67662 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67663 void sort_extable(struct exception_table_entry *start,
67664 struct exception_table_entry *finish)
67665 {
67666 + pax_open_kernel();
67667 sort(start, finish - start, sizeof(struct exception_table_entry),
67668 cmp_ex, NULL);
67669 + pax_close_kernel();
67670 }
67671
67672 #ifdef CONFIG_MODULES
67673 diff --git a/lib/inflate.c b/lib/inflate.c
67674 index 013a761..c28f3fc 100644
67675 --- a/lib/inflate.c
67676 +++ b/lib/inflate.c
67677 @@ -269,7 +269,7 @@ static void free(void *where)
67678 malloc_ptr = free_mem_ptr;
67679 }
67680 #else
67681 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67682 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67683 #define free(a) kfree(a)
67684 #endif
67685
67686 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67687 index bd2bea9..6b3c95e 100644
67688 --- a/lib/is_single_threaded.c
67689 +++ b/lib/is_single_threaded.c
67690 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67691 struct task_struct *p, *t;
67692 bool ret;
67693
67694 + if (!mm)
67695 + return true;
67696 +
67697 if (atomic_read(&task->signal->live) != 1)
67698 return false;
67699
67700 diff --git a/lib/kref.c b/lib/kref.c
67701 index 3efb882..8492f4c 100644
67702 --- a/lib/kref.c
67703 +++ b/lib/kref.c
67704 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67705 */
67706 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67707 {
67708 - WARN_ON(release == NULL);
67709 + BUG_ON(release == NULL);
67710 WARN_ON(release == (void (*)(struct kref *))kfree);
67711
67712 if (atomic_dec_and_test(&kref->refcount)) {
67713 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67714 index d9df745..e73c2fe 100644
67715 --- a/lib/radix-tree.c
67716 +++ b/lib/radix-tree.c
67717 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67718 int nr;
67719 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67720 };
67721 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67722 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67723
67724 static inline void *ptr_to_indirect(void *ptr)
67725 {
67726 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67727 index 993599e..84dc70e 100644
67728 --- a/lib/vsprintf.c
67729 +++ b/lib/vsprintf.c
67730 @@ -16,6 +16,9 @@
67731 * - scnprintf and vscnprintf
67732 */
67733
67734 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67735 +#define __INCLUDED_BY_HIDESYM 1
67736 +#endif
67737 #include <stdarg.h>
67738 #include <linux/module.h>
67739 #include <linux/types.h>
67740 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67741 char sym[KSYM_SYMBOL_LEN];
67742 if (ext == 'B')
67743 sprint_backtrace(sym, value);
67744 - else if (ext != 'f' && ext != 's')
67745 + else if (ext != 'f' && ext != 's' && ext != 'a')
67746 sprint_symbol(sym, value);
67747 else
67748 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67749 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67750 return string(buf, end, uuid, spec);
67751 }
67752
67753 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67754 +int kptr_restrict __read_mostly = 2;
67755 +#else
67756 int kptr_restrict __read_mostly;
67757 +#endif
67758
67759 /*
67760 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67761 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67762 * - 'S' For symbolic direct pointers with offset
67763 * - 's' For symbolic direct pointers without offset
67764 * - 'B' For backtraced symbolic direct pointers with offset
67765 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67766 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67767 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67768 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67769 * - 'M' For a 6-byte MAC address, it prints the address in the
67770 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67771 {
67772 if (!ptr && *fmt != 'K') {
67773 /*
67774 - * Print (null) with the same width as a pointer so it makes
67775 + * Print (nil) with the same width as a pointer so it makes
67776 * tabular output look nice.
67777 */
67778 if (spec.field_width == -1)
67779 spec.field_width = 2 * sizeof(void *);
67780 - return string(buf, end, "(null)", spec);
67781 + return string(buf, end, "(nil)", spec);
67782 }
67783
67784 switch (*fmt) {
67785 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67786 /* Fallthrough */
67787 case 'S':
67788 case 's':
67789 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67790 + break;
67791 +#else
67792 + return symbol_string(buf, end, ptr, spec, *fmt);
67793 +#endif
67794 + case 'A':
67795 + case 'a':
67796 case 'B':
67797 return symbol_string(buf, end, ptr, spec, *fmt);
67798 case 'R':
67799 @@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67800 typeof(type) value; \
67801 if (sizeof(type) == 8) { \
67802 args = PTR_ALIGN(args, sizeof(u32)); \
67803 - *(u32 *)&value = *(u32 *)args; \
67804 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67805 + *(u32 *)&value = *(const u32 *)args; \
67806 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67807 } else { \
67808 args = PTR_ALIGN(args, sizeof(type)); \
67809 - value = *(typeof(type) *)args; \
67810 + value = *(const typeof(type) *)args; \
67811 } \
67812 args += sizeof(type); \
67813 value; \
67814 @@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67815 case FORMAT_TYPE_STR: {
67816 const char *str_arg = args;
67817 args += strlen(str_arg) + 1;
67818 - str = string(str, end, (char *)str_arg, spec);
67819 + str = string(str, end, str_arg, spec);
67820 break;
67821 }
67822
67823 diff --git a/localversion-grsec b/localversion-grsec
67824 new file mode 100644
67825 index 0000000..7cd6065
67826 --- /dev/null
67827 +++ b/localversion-grsec
67828 @@ -0,0 +1 @@
67829 +-grsec
67830 diff --git a/mm/Kconfig b/mm/Kconfig
67831 index 011b110..b492af2 100644
67832 --- a/mm/Kconfig
67833 +++ b/mm/Kconfig
67834 @@ -241,10 +241,10 @@ config KSM
67835 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67836
67837 config DEFAULT_MMAP_MIN_ADDR
67838 - int "Low address space to protect from user allocation"
67839 + int "Low address space to protect from user allocation"
67840 depends on MMU
67841 - default 4096
67842 - help
67843 + default 65536
67844 + help
67845 This is the portion of low virtual memory which should be protected
67846 from userspace allocation. Keeping a user from writing to low pages
67847 can help reduce the impact of kernel NULL pointer bugs.
67848 diff --git a/mm/filemap.c b/mm/filemap.c
67849 index 03c5b0e..a01e793 100644
67850 --- a/mm/filemap.c
67851 +++ b/mm/filemap.c
67852 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67853 struct address_space *mapping = file->f_mapping;
67854
67855 if (!mapping->a_ops->readpage)
67856 - return -ENOEXEC;
67857 + return -ENODEV;
67858 file_accessed(file);
67859 vma->vm_ops = &generic_file_vm_ops;
67860 vma->vm_flags |= VM_CAN_NONLINEAR;
67861 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67862 *pos = i_size_read(inode);
67863
67864 if (limit != RLIM_INFINITY) {
67865 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67866 if (*pos >= limit) {
67867 send_sig(SIGXFSZ, current, 0);
67868 return -EFBIG;
67869 diff --git a/mm/fremap.c b/mm/fremap.c
67870 index 9ed4fd4..c42648d 100644
67871 --- a/mm/fremap.c
67872 +++ b/mm/fremap.c
67873 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67874 retry:
67875 vma = find_vma(mm, start);
67876
67877 +#ifdef CONFIG_PAX_SEGMEXEC
67878 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67879 + goto out;
67880 +#endif
67881 +
67882 /*
67883 * Make sure the vma is shared, that it supports prefaulting,
67884 * and that the remapped range is valid and fully within
67885 diff --git a/mm/highmem.c b/mm/highmem.c
67886 index 57d82c6..e9e0552 100644
67887 --- a/mm/highmem.c
67888 +++ b/mm/highmem.c
67889 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67890 * So no dangers, even with speculative execution.
67891 */
67892 page = pte_page(pkmap_page_table[i]);
67893 + pax_open_kernel();
67894 pte_clear(&init_mm, (unsigned long)page_address(page),
67895 &pkmap_page_table[i]);
67896 -
67897 + pax_close_kernel();
67898 set_page_address(page, NULL);
67899 need_flush = 1;
67900 }
67901 @@ -186,9 +187,11 @@ start:
67902 }
67903 }
67904 vaddr = PKMAP_ADDR(last_pkmap_nr);
67905 +
67906 + pax_open_kernel();
67907 set_pte_at(&init_mm, vaddr,
67908 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67909 -
67910 + pax_close_kernel();
67911 pkmap_count[last_pkmap_nr] = 1;
67912 set_page_address(page, (void *)vaddr);
67913
67914 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67915 index 33141f5..e56bef9 100644
67916 --- a/mm/huge_memory.c
67917 +++ b/mm/huge_memory.c
67918 @@ -703,7 +703,7 @@ out:
67919 * run pte_offset_map on the pmd, if an huge pmd could
67920 * materialize from under us from a different thread.
67921 */
67922 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67923 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67924 return VM_FAULT_OOM;
67925 /* if an huge pmd materialized from under us just retry later */
67926 if (unlikely(pmd_trans_huge(*pmd)))
67927 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67928 index 2316840..b418671 100644
67929 --- a/mm/hugetlb.c
67930 +++ b/mm/hugetlb.c
67931 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67932 return 1;
67933 }
67934
67935 +#ifdef CONFIG_PAX_SEGMEXEC
67936 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67937 +{
67938 + struct mm_struct *mm = vma->vm_mm;
67939 + struct vm_area_struct *vma_m;
67940 + unsigned long address_m;
67941 + pte_t *ptep_m;
67942 +
67943 + vma_m = pax_find_mirror_vma(vma);
67944 + if (!vma_m)
67945 + return;
67946 +
67947 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67948 + address_m = address + SEGMEXEC_TASK_SIZE;
67949 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67950 + get_page(page_m);
67951 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67952 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67953 +}
67954 +#endif
67955 +
67956 /*
67957 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67958 */
67959 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
67960 make_huge_pte(vma, new_page, 1));
67961 page_remove_rmap(old_page);
67962 hugepage_add_new_anon_rmap(new_page, vma, address);
67963 +
67964 +#ifdef CONFIG_PAX_SEGMEXEC
67965 + pax_mirror_huge_pte(vma, address, new_page);
67966 +#endif
67967 +
67968 /* Make the old page be freed below */
67969 new_page = old_page;
67970 mmu_notifier_invalidate_range_end(mm,
67971 @@ -2601,6 +2627,10 @@ retry:
67972 && (vma->vm_flags & VM_SHARED)));
67973 set_huge_pte_at(mm, address, ptep, new_pte);
67974
67975 +#ifdef CONFIG_PAX_SEGMEXEC
67976 + pax_mirror_huge_pte(vma, address, page);
67977 +#endif
67978 +
67979 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67980 /* Optimization, do the COW without a second fault */
67981 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67982 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67983 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67984 struct hstate *h = hstate_vma(vma);
67985
67986 +#ifdef CONFIG_PAX_SEGMEXEC
67987 + struct vm_area_struct *vma_m;
67988 +#endif
67989 +
67990 ptep = huge_pte_offset(mm, address);
67991 if (ptep) {
67992 entry = huge_ptep_get(ptep);
67993 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67994 VM_FAULT_SET_HINDEX(h - hstates);
67995 }
67996
67997 +#ifdef CONFIG_PAX_SEGMEXEC
67998 + vma_m = pax_find_mirror_vma(vma);
67999 + if (vma_m) {
68000 + unsigned long address_m;
68001 +
68002 + if (vma->vm_start > vma_m->vm_start) {
68003 + address_m = address;
68004 + address -= SEGMEXEC_TASK_SIZE;
68005 + vma = vma_m;
68006 + h = hstate_vma(vma);
68007 + } else
68008 + address_m = address + SEGMEXEC_TASK_SIZE;
68009 +
68010 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
68011 + return VM_FAULT_OOM;
68012 + address_m &= HPAGE_MASK;
68013 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
68014 + }
68015 +#endif
68016 +
68017 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
68018 if (!ptep)
68019 return VM_FAULT_OOM;
68020 diff --git a/mm/internal.h b/mm/internal.h
68021 index 2189af4..f2ca332 100644
68022 --- a/mm/internal.h
68023 +++ b/mm/internal.h
68024 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
68025 * in mm/page_alloc.c
68026 */
68027 extern void __free_pages_bootmem(struct page *page, unsigned int order);
68028 +extern void free_compound_page(struct page *page);
68029 extern void prep_compound_page(struct page *page, unsigned long order);
68030 #ifdef CONFIG_MEMORY_FAILURE
68031 extern bool is_free_buddy_page(struct page *page);
68032 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
68033 index f3b2a00..61da94d 100644
68034 --- a/mm/kmemleak.c
68035 +++ b/mm/kmemleak.c
68036 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
68037
68038 for (i = 0; i < object->trace_len; i++) {
68039 void *ptr = (void *)object->trace[i];
68040 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
68041 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
68042 }
68043 }
68044
68045 diff --git a/mm/maccess.c b/mm/maccess.c
68046 index d53adf9..03a24bf 100644
68047 --- a/mm/maccess.c
68048 +++ b/mm/maccess.c
68049 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
68050 set_fs(KERNEL_DS);
68051 pagefault_disable();
68052 ret = __copy_from_user_inatomic(dst,
68053 - (__force const void __user *)src, size);
68054 + (const void __force_user *)src, size);
68055 pagefault_enable();
68056 set_fs(old_fs);
68057
68058 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
68059
68060 set_fs(KERNEL_DS);
68061 pagefault_disable();
68062 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
68063 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
68064 pagefault_enable();
68065 set_fs(old_fs);
68066
68067 diff --git a/mm/madvise.c b/mm/madvise.c
68068 index 74bf193..feb6fd3 100644
68069 --- a/mm/madvise.c
68070 +++ b/mm/madvise.c
68071 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
68072 pgoff_t pgoff;
68073 unsigned long new_flags = vma->vm_flags;
68074
68075 +#ifdef CONFIG_PAX_SEGMEXEC
68076 + struct vm_area_struct *vma_m;
68077 +#endif
68078 +
68079 switch (behavior) {
68080 case MADV_NORMAL:
68081 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
68082 @@ -110,6 +114,13 @@ success:
68083 /*
68084 * vm_flags is protected by the mmap_sem held in write mode.
68085 */
68086 +
68087 +#ifdef CONFIG_PAX_SEGMEXEC
68088 + vma_m = pax_find_mirror_vma(vma);
68089 + if (vma_m)
68090 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
68091 +#endif
68092 +
68093 vma->vm_flags = new_flags;
68094
68095 out:
68096 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68097 struct vm_area_struct ** prev,
68098 unsigned long start, unsigned long end)
68099 {
68100 +
68101 +#ifdef CONFIG_PAX_SEGMEXEC
68102 + struct vm_area_struct *vma_m;
68103 +#endif
68104 +
68105 *prev = vma;
68106 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
68107 return -EINVAL;
68108 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68109 zap_page_range(vma, start, end - start, &details);
68110 } else
68111 zap_page_range(vma, start, end - start, NULL);
68112 +
68113 +#ifdef CONFIG_PAX_SEGMEXEC
68114 + vma_m = pax_find_mirror_vma(vma);
68115 + if (vma_m) {
68116 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
68117 + struct zap_details details = {
68118 + .nonlinear_vma = vma_m,
68119 + .last_index = ULONG_MAX,
68120 + };
68121 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
68122 + } else
68123 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
68124 + }
68125 +#endif
68126 +
68127 return 0;
68128 }
68129
68130 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
68131 if (end < start)
68132 goto out;
68133
68134 +#ifdef CONFIG_PAX_SEGMEXEC
68135 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
68136 + if (end > SEGMEXEC_TASK_SIZE)
68137 + goto out;
68138 + } else
68139 +#endif
68140 +
68141 + if (end > TASK_SIZE)
68142 + goto out;
68143 +
68144 error = 0;
68145 if (end == start)
68146 goto out;
68147 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
68148 index 06d3479..0778eef 100644
68149 --- a/mm/memory-failure.c
68150 +++ b/mm/memory-failure.c
68151 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
68152
68153 int sysctl_memory_failure_recovery __read_mostly = 1;
68154
68155 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68156 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68157
68158 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68159
68160 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
68161 si.si_signo = SIGBUS;
68162 si.si_errno = 0;
68163 si.si_code = BUS_MCEERR_AO;
68164 - si.si_addr = (void *)addr;
68165 + si.si_addr = (void __user *)addr;
68166 #ifdef __ARCH_SI_TRAPNO
68167 si.si_trapno = trapno;
68168 #endif
68169 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68170 }
68171
68172 nr_pages = 1 << compound_trans_order(hpage);
68173 - atomic_long_add(nr_pages, &mce_bad_pages);
68174 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68175
68176 /*
68177 * We need/can do nothing about count=0 pages.
68178 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68179 if (!PageHWPoison(hpage)
68180 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68181 || (p != hpage && TestSetPageHWPoison(hpage))) {
68182 - atomic_long_sub(nr_pages, &mce_bad_pages);
68183 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68184 return 0;
68185 }
68186 set_page_hwpoison_huge_page(hpage);
68187 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68188 }
68189 if (hwpoison_filter(p)) {
68190 if (TestClearPageHWPoison(p))
68191 - atomic_long_sub(nr_pages, &mce_bad_pages);
68192 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68193 unlock_page(hpage);
68194 put_page(hpage);
68195 return 0;
68196 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
68197 return 0;
68198 }
68199 if (TestClearPageHWPoison(p))
68200 - atomic_long_sub(nr_pages, &mce_bad_pages);
68201 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68202 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68203 return 0;
68204 }
68205 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
68206 */
68207 if (TestClearPageHWPoison(page)) {
68208 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68209 - atomic_long_sub(nr_pages, &mce_bad_pages);
68210 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68211 freeit = 1;
68212 if (PageHuge(page))
68213 clear_page_hwpoison_huge_page(page);
68214 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
68215 }
68216 done:
68217 if (!PageHWPoison(hpage))
68218 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68219 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68220 set_page_hwpoison_huge_page(hpage);
68221 dequeue_hwpoisoned_huge_page(hpage);
68222 /* keep elevated page count for bad page */
68223 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
68224 return ret;
68225
68226 done:
68227 - atomic_long_add(1, &mce_bad_pages);
68228 + atomic_long_add_unchecked(1, &mce_bad_pages);
68229 SetPageHWPoison(page);
68230 /* keep elevated page count for bad page */
68231 return ret;
68232 diff --git a/mm/memory.c b/mm/memory.c
68233 index 829d437..3d3926a 100644
68234 --- a/mm/memory.c
68235 +++ b/mm/memory.c
68236 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
68237 return;
68238
68239 pmd = pmd_offset(pud, start);
68240 +
68241 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
68242 pud_clear(pud);
68243 pmd_free_tlb(tlb, pmd, start);
68244 +#endif
68245 +
68246 }
68247
68248 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68249 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68250 if (end - 1 > ceiling - 1)
68251 return;
68252
68253 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
68254 pud = pud_offset(pgd, start);
68255 pgd_clear(pgd);
68256 pud_free_tlb(tlb, pud, start);
68257 +#endif
68258 +
68259 }
68260
68261 /*
68262 @@ -1566,12 +1573,6 @@ no_page_table:
68263 return page;
68264 }
68265
68266 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68267 -{
68268 - return stack_guard_page_start(vma, addr) ||
68269 - stack_guard_page_end(vma, addr+PAGE_SIZE);
68270 -}
68271 -
68272 /**
68273 * __get_user_pages() - pin user pages in memory
68274 * @tsk: task_struct of target task
68275 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68276 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
68277 i = 0;
68278
68279 - do {
68280 + while (nr_pages) {
68281 struct vm_area_struct *vma;
68282
68283 - vma = find_extend_vma(mm, start);
68284 + vma = find_vma(mm, start);
68285 if (!vma && in_gate_area(mm, start)) {
68286 unsigned long pg = start & PAGE_MASK;
68287 pgd_t *pgd;
68288 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68289 goto next_page;
68290 }
68291
68292 - if (!vma ||
68293 + if (!vma || start < vma->vm_start ||
68294 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
68295 !(vm_flags & vma->vm_flags))
68296 return i ? : -EFAULT;
68297 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68298 int ret;
68299 unsigned int fault_flags = 0;
68300
68301 - /* For mlock, just skip the stack guard page. */
68302 - if (foll_flags & FOLL_MLOCK) {
68303 - if (stack_guard_page(vma, start))
68304 - goto next_page;
68305 - }
68306 if (foll_flags & FOLL_WRITE)
68307 fault_flags |= FAULT_FLAG_WRITE;
68308 if (nonblocking)
68309 @@ -1800,7 +1796,7 @@ next_page:
68310 start += PAGE_SIZE;
68311 nr_pages--;
68312 } while (nr_pages && start < vma->vm_end);
68313 - } while (nr_pages);
68314 + }
68315 return i;
68316 }
68317 EXPORT_SYMBOL(__get_user_pages);
68318 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68319 page_add_file_rmap(page);
68320 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68321
68322 +#ifdef CONFIG_PAX_SEGMEXEC
68323 + pax_mirror_file_pte(vma, addr, page, ptl);
68324 +#endif
68325 +
68326 retval = 0;
68327 pte_unmap_unlock(pte, ptl);
68328 return retval;
68329 @@ -2041,10 +2041,22 @@ out:
68330 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68331 struct page *page)
68332 {
68333 +
68334 +#ifdef CONFIG_PAX_SEGMEXEC
68335 + struct vm_area_struct *vma_m;
68336 +#endif
68337 +
68338 if (addr < vma->vm_start || addr >= vma->vm_end)
68339 return -EFAULT;
68340 if (!page_count(page))
68341 return -EINVAL;
68342 +
68343 +#ifdef CONFIG_PAX_SEGMEXEC
68344 + vma_m = pax_find_mirror_vma(vma);
68345 + if (vma_m)
68346 + vma_m->vm_flags |= VM_INSERTPAGE;
68347 +#endif
68348 +
68349 vma->vm_flags |= VM_INSERTPAGE;
68350 return insert_page(vma, addr, page, vma->vm_page_prot);
68351 }
68352 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68353 unsigned long pfn)
68354 {
68355 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68356 + BUG_ON(vma->vm_mirror);
68357
68358 if (addr < vma->vm_start || addr >= vma->vm_end)
68359 return -EFAULT;
68360 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68361 copy_user_highpage(dst, src, va, vma);
68362 }
68363
68364 +#ifdef CONFIG_PAX_SEGMEXEC
68365 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68366 +{
68367 + struct mm_struct *mm = vma->vm_mm;
68368 + spinlock_t *ptl;
68369 + pte_t *pte, entry;
68370 +
68371 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68372 + entry = *pte;
68373 + if (!pte_present(entry)) {
68374 + if (!pte_none(entry)) {
68375 + BUG_ON(pte_file(entry));
68376 + free_swap_and_cache(pte_to_swp_entry(entry));
68377 + pte_clear_not_present_full(mm, address, pte, 0);
68378 + }
68379 + } else {
68380 + struct page *page;
68381 +
68382 + flush_cache_page(vma, address, pte_pfn(entry));
68383 + entry = ptep_clear_flush(vma, address, pte);
68384 + BUG_ON(pte_dirty(entry));
68385 + page = vm_normal_page(vma, address, entry);
68386 + if (page) {
68387 + update_hiwater_rss(mm);
68388 + if (PageAnon(page))
68389 + dec_mm_counter_fast(mm, MM_ANONPAGES);
68390 + else
68391 + dec_mm_counter_fast(mm, MM_FILEPAGES);
68392 + page_remove_rmap(page);
68393 + page_cache_release(page);
68394 + }
68395 + }
68396 + pte_unmap_unlock(pte, ptl);
68397 +}
68398 +
68399 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
68400 + *
68401 + * the ptl of the lower mapped page is held on entry and is not released on exit
68402 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68403 + */
68404 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68405 +{
68406 + struct mm_struct *mm = vma->vm_mm;
68407 + unsigned long address_m;
68408 + spinlock_t *ptl_m;
68409 + struct vm_area_struct *vma_m;
68410 + pmd_t *pmd_m;
68411 + pte_t *pte_m, entry_m;
68412 +
68413 + BUG_ON(!page_m || !PageAnon(page_m));
68414 +
68415 + vma_m = pax_find_mirror_vma(vma);
68416 + if (!vma_m)
68417 + return;
68418 +
68419 + BUG_ON(!PageLocked(page_m));
68420 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68421 + address_m = address + SEGMEXEC_TASK_SIZE;
68422 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68423 + pte_m = pte_offset_map(pmd_m, address_m);
68424 + ptl_m = pte_lockptr(mm, pmd_m);
68425 + if (ptl != ptl_m) {
68426 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68427 + if (!pte_none(*pte_m))
68428 + goto out;
68429 + }
68430 +
68431 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68432 + page_cache_get(page_m);
68433 + page_add_anon_rmap(page_m, vma_m, address_m);
68434 + inc_mm_counter_fast(mm, MM_ANONPAGES);
68435 + set_pte_at(mm, address_m, pte_m, entry_m);
68436 + update_mmu_cache(vma_m, address_m, entry_m);
68437 +out:
68438 + if (ptl != ptl_m)
68439 + spin_unlock(ptl_m);
68440 + pte_unmap(pte_m);
68441 + unlock_page(page_m);
68442 +}
68443 +
68444 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68445 +{
68446 + struct mm_struct *mm = vma->vm_mm;
68447 + unsigned long address_m;
68448 + spinlock_t *ptl_m;
68449 + struct vm_area_struct *vma_m;
68450 + pmd_t *pmd_m;
68451 + pte_t *pte_m, entry_m;
68452 +
68453 + BUG_ON(!page_m || PageAnon(page_m));
68454 +
68455 + vma_m = pax_find_mirror_vma(vma);
68456 + if (!vma_m)
68457 + return;
68458 +
68459 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68460 + address_m = address + SEGMEXEC_TASK_SIZE;
68461 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68462 + pte_m = pte_offset_map(pmd_m, address_m);
68463 + ptl_m = pte_lockptr(mm, pmd_m);
68464 + if (ptl != ptl_m) {
68465 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68466 + if (!pte_none(*pte_m))
68467 + goto out;
68468 + }
68469 +
68470 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68471 + page_cache_get(page_m);
68472 + page_add_file_rmap(page_m);
68473 + inc_mm_counter_fast(mm, MM_FILEPAGES);
68474 + set_pte_at(mm, address_m, pte_m, entry_m);
68475 + update_mmu_cache(vma_m, address_m, entry_m);
68476 +out:
68477 + if (ptl != ptl_m)
68478 + spin_unlock(ptl_m);
68479 + pte_unmap(pte_m);
68480 +}
68481 +
68482 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68483 +{
68484 + struct mm_struct *mm = vma->vm_mm;
68485 + unsigned long address_m;
68486 + spinlock_t *ptl_m;
68487 + struct vm_area_struct *vma_m;
68488 + pmd_t *pmd_m;
68489 + pte_t *pte_m, entry_m;
68490 +
68491 + vma_m = pax_find_mirror_vma(vma);
68492 + if (!vma_m)
68493 + return;
68494 +
68495 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68496 + address_m = address + SEGMEXEC_TASK_SIZE;
68497 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68498 + pte_m = pte_offset_map(pmd_m, address_m);
68499 + ptl_m = pte_lockptr(mm, pmd_m);
68500 + if (ptl != ptl_m) {
68501 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68502 + if (!pte_none(*pte_m))
68503 + goto out;
68504 + }
68505 +
68506 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68507 + set_pte_at(mm, address_m, pte_m, entry_m);
68508 +out:
68509 + if (ptl != ptl_m)
68510 + spin_unlock(ptl_m);
68511 + pte_unmap(pte_m);
68512 +}
68513 +
68514 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68515 +{
68516 + struct page *page_m;
68517 + pte_t entry;
68518 +
68519 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68520 + goto out;
68521 +
68522 + entry = *pte;
68523 + page_m = vm_normal_page(vma, address, entry);
68524 + if (!page_m)
68525 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68526 + else if (PageAnon(page_m)) {
68527 + if (pax_find_mirror_vma(vma)) {
68528 + pte_unmap_unlock(pte, ptl);
68529 + lock_page(page_m);
68530 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68531 + if (pte_same(entry, *pte))
68532 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68533 + else
68534 + unlock_page(page_m);
68535 + }
68536 + } else
68537 + pax_mirror_file_pte(vma, address, page_m, ptl);
68538 +
68539 +out:
68540 + pte_unmap_unlock(pte, ptl);
68541 +}
68542 +#endif
68543 +
68544 /*
68545 * This routine handles present pages, when users try to write
68546 * to a shared page. It is done by copying the page to a new address
68547 @@ -2656,6 +2849,12 @@ gotten:
68548 */
68549 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68550 if (likely(pte_same(*page_table, orig_pte))) {
68551 +
68552 +#ifdef CONFIG_PAX_SEGMEXEC
68553 + if (pax_find_mirror_vma(vma))
68554 + BUG_ON(!trylock_page(new_page));
68555 +#endif
68556 +
68557 if (old_page) {
68558 if (!PageAnon(old_page)) {
68559 dec_mm_counter_fast(mm, MM_FILEPAGES);
68560 @@ -2707,6 +2906,10 @@ gotten:
68561 page_remove_rmap(old_page);
68562 }
68563
68564 +#ifdef CONFIG_PAX_SEGMEXEC
68565 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68566 +#endif
68567 +
68568 /* Free the old page.. */
68569 new_page = old_page;
68570 ret |= VM_FAULT_WRITE;
68571 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68572 swap_free(entry);
68573 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68574 try_to_free_swap(page);
68575 +
68576 +#ifdef CONFIG_PAX_SEGMEXEC
68577 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68578 +#endif
68579 +
68580 unlock_page(page);
68581 if (swapcache) {
68582 /*
68583 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68584
68585 /* No need to invalidate - it was non-present before */
68586 update_mmu_cache(vma, address, page_table);
68587 +
68588 +#ifdef CONFIG_PAX_SEGMEXEC
68589 + pax_mirror_anon_pte(vma, address, page, ptl);
68590 +#endif
68591 +
68592 unlock:
68593 pte_unmap_unlock(page_table, ptl);
68594 out:
68595 @@ -3028,40 +3241,6 @@ out_release:
68596 }
68597
68598 /*
68599 - * This is like a special single-page "expand_{down|up}wards()",
68600 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68601 - * doesn't hit another vma.
68602 - */
68603 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68604 -{
68605 - address &= PAGE_MASK;
68606 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68607 - struct vm_area_struct *prev = vma->vm_prev;
68608 -
68609 - /*
68610 - * Is there a mapping abutting this one below?
68611 - *
68612 - * That's only ok if it's the same stack mapping
68613 - * that has gotten split..
68614 - */
68615 - if (prev && prev->vm_end == address)
68616 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68617 -
68618 - expand_downwards(vma, address - PAGE_SIZE);
68619 - }
68620 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68621 - struct vm_area_struct *next = vma->vm_next;
68622 -
68623 - /* As VM_GROWSDOWN but s/below/above/ */
68624 - if (next && next->vm_start == address + PAGE_SIZE)
68625 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68626 -
68627 - expand_upwards(vma, address + PAGE_SIZE);
68628 - }
68629 - return 0;
68630 -}
68631 -
68632 -/*
68633 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68634 * but allow concurrent faults), and pte mapped but not yet locked.
68635 * We return with mmap_sem still held, but pte unmapped and unlocked.
68636 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68637 unsigned long address, pte_t *page_table, pmd_t *pmd,
68638 unsigned int flags)
68639 {
68640 - struct page *page;
68641 + struct page *page = NULL;
68642 spinlock_t *ptl;
68643 pte_t entry;
68644
68645 - pte_unmap(page_table);
68646 -
68647 - /* Check if we need to add a guard page to the stack */
68648 - if (check_stack_guard_page(vma, address) < 0)
68649 - return VM_FAULT_SIGBUS;
68650 -
68651 - /* Use the zero-page for reads */
68652 if (!(flags & FAULT_FLAG_WRITE)) {
68653 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68654 vma->vm_page_prot));
68655 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68656 + ptl = pte_lockptr(mm, pmd);
68657 + spin_lock(ptl);
68658 if (!pte_none(*page_table))
68659 goto unlock;
68660 goto setpte;
68661 }
68662
68663 /* Allocate our own private page. */
68664 + pte_unmap(page_table);
68665 +
68666 if (unlikely(anon_vma_prepare(vma)))
68667 goto oom;
68668 page = alloc_zeroed_user_highpage_movable(vma, address);
68669 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68670 if (!pte_none(*page_table))
68671 goto release;
68672
68673 +#ifdef CONFIG_PAX_SEGMEXEC
68674 + if (pax_find_mirror_vma(vma))
68675 + BUG_ON(!trylock_page(page));
68676 +#endif
68677 +
68678 inc_mm_counter_fast(mm, MM_ANONPAGES);
68679 page_add_new_anon_rmap(page, vma, address);
68680 setpte:
68681 @@ -3116,6 +3296,12 @@ setpte:
68682
68683 /* No need to invalidate - it was non-present before */
68684 update_mmu_cache(vma, address, page_table);
68685 +
68686 +#ifdef CONFIG_PAX_SEGMEXEC
68687 + if (page)
68688 + pax_mirror_anon_pte(vma, address, page, ptl);
68689 +#endif
68690 +
68691 unlock:
68692 pte_unmap_unlock(page_table, ptl);
68693 return 0;
68694 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68695 */
68696 /* Only go through if we didn't race with anybody else... */
68697 if (likely(pte_same(*page_table, orig_pte))) {
68698 +
68699 +#ifdef CONFIG_PAX_SEGMEXEC
68700 + if (anon && pax_find_mirror_vma(vma))
68701 + BUG_ON(!trylock_page(page));
68702 +#endif
68703 +
68704 flush_icache_page(vma, page);
68705 entry = mk_pte(page, vma->vm_page_prot);
68706 if (flags & FAULT_FLAG_WRITE)
68707 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68708
68709 /* no need to invalidate: a not-present page won't be cached */
68710 update_mmu_cache(vma, address, page_table);
68711 +
68712 +#ifdef CONFIG_PAX_SEGMEXEC
68713 + if (anon)
68714 + pax_mirror_anon_pte(vma, address, page, ptl);
68715 + else
68716 + pax_mirror_file_pte(vma, address, page, ptl);
68717 +#endif
68718 +
68719 } else {
68720 if (cow_page)
68721 mem_cgroup_uncharge_page(cow_page);
68722 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68723 if (flags & FAULT_FLAG_WRITE)
68724 flush_tlb_fix_spurious_fault(vma, address);
68725 }
68726 +
68727 +#ifdef CONFIG_PAX_SEGMEXEC
68728 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68729 + return 0;
68730 +#endif
68731 +
68732 unlock:
68733 pte_unmap_unlock(pte, ptl);
68734 return 0;
68735 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68736 pmd_t *pmd;
68737 pte_t *pte;
68738
68739 +#ifdef CONFIG_PAX_SEGMEXEC
68740 + struct vm_area_struct *vma_m;
68741 +#endif
68742 +
68743 __set_current_state(TASK_RUNNING);
68744
68745 count_vm_event(PGFAULT);
68746 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68747 if (unlikely(is_vm_hugetlb_page(vma)))
68748 return hugetlb_fault(mm, vma, address, flags);
68749
68750 +#ifdef CONFIG_PAX_SEGMEXEC
68751 + vma_m = pax_find_mirror_vma(vma);
68752 + if (vma_m) {
68753 + unsigned long address_m;
68754 + pgd_t *pgd_m;
68755 + pud_t *pud_m;
68756 + pmd_t *pmd_m;
68757 +
68758 + if (vma->vm_start > vma_m->vm_start) {
68759 + address_m = address;
68760 + address -= SEGMEXEC_TASK_SIZE;
68761 + vma = vma_m;
68762 + } else
68763 + address_m = address + SEGMEXEC_TASK_SIZE;
68764 +
68765 + pgd_m = pgd_offset(mm, address_m);
68766 + pud_m = pud_alloc(mm, pgd_m, address_m);
68767 + if (!pud_m)
68768 + return VM_FAULT_OOM;
68769 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68770 + if (!pmd_m)
68771 + return VM_FAULT_OOM;
68772 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68773 + return VM_FAULT_OOM;
68774 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68775 + }
68776 +#endif
68777 +
68778 pgd = pgd_offset(mm, address);
68779 pud = pud_alloc(mm, pgd, address);
68780 if (!pud)
68781 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68782 * run pte_offset_map on the pmd, if an huge pmd could
68783 * materialize from under us from a different thread.
68784 */
68785 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68786 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68787 return VM_FAULT_OOM;
68788 /* if an huge pmd materialized from under us just retry later */
68789 if (unlikely(pmd_trans_huge(*pmd)))
68790 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68791 gate_vma.vm_start = FIXADDR_USER_START;
68792 gate_vma.vm_end = FIXADDR_USER_END;
68793 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68794 - gate_vma.vm_page_prot = __P101;
68795 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68796 /*
68797 * Make sure the vDSO gets into every core dump.
68798 * Dumping its contents makes post-mortem fully interpretable later
68799 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68800 index c3fdbcb..2e8ef90 100644
68801 --- a/mm/mempolicy.c
68802 +++ b/mm/mempolicy.c
68803 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68804 unsigned long vmstart;
68805 unsigned long vmend;
68806
68807 +#ifdef CONFIG_PAX_SEGMEXEC
68808 + struct vm_area_struct *vma_m;
68809 +#endif
68810 +
68811 vma = find_vma_prev(mm, start, &prev);
68812 if (!vma || vma->vm_start > start)
68813 return -EFAULT;
68814 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68815 err = policy_vma(vma, new_pol);
68816 if (err)
68817 goto out;
68818 +
68819 +#ifdef CONFIG_PAX_SEGMEXEC
68820 + vma_m = pax_find_mirror_vma(vma);
68821 + if (vma_m) {
68822 + err = policy_vma(vma_m, new_pol);
68823 + if (err)
68824 + goto out;
68825 + }
68826 +#endif
68827 +
68828 }
68829
68830 out:
68831 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68832
68833 if (end < start)
68834 return -EINVAL;
68835 +
68836 +#ifdef CONFIG_PAX_SEGMEXEC
68837 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68838 + if (end > SEGMEXEC_TASK_SIZE)
68839 + return -EINVAL;
68840 + } else
68841 +#endif
68842 +
68843 + if (end > TASK_SIZE)
68844 + return -EINVAL;
68845 +
68846 if (end == start)
68847 return 0;
68848
68849 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68850 if (!mm)
68851 goto out;
68852
68853 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68854 + if (mm != current->mm &&
68855 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68856 + err = -EPERM;
68857 + goto out;
68858 + }
68859 +#endif
68860 +
68861 /*
68862 * Check if this process has the right to modify the specified
68863 * process. The right exists if the process has administrative
68864 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68865 rcu_read_lock();
68866 tcred = __task_cred(task);
68867 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68868 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68869 - !capable(CAP_SYS_NICE)) {
68870 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68871 rcu_read_unlock();
68872 err = -EPERM;
68873 goto out;
68874 diff --git a/mm/migrate.c b/mm/migrate.c
68875 index 177aca4..ab3a744 100644
68876 --- a/mm/migrate.c
68877 +++ b/mm/migrate.c
68878 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68879 if (!mm)
68880 return -EINVAL;
68881
68882 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68883 + if (mm != current->mm &&
68884 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68885 + err = -EPERM;
68886 + goto out;
68887 + }
68888 +#endif
68889 +
68890 /*
68891 * Check if this process has the right to modify the specified
68892 * process. The right exists if the process has administrative
68893 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68894 rcu_read_lock();
68895 tcred = __task_cred(task);
68896 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68897 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68898 - !capable(CAP_SYS_NICE)) {
68899 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68900 rcu_read_unlock();
68901 err = -EPERM;
68902 goto out;
68903 diff --git a/mm/mlock.c b/mm/mlock.c
68904 index 4f4f53b..9511904 100644
68905 --- a/mm/mlock.c
68906 +++ b/mm/mlock.c
68907 @@ -13,6 +13,7 @@
68908 #include <linux/pagemap.h>
68909 #include <linux/mempolicy.h>
68910 #include <linux/syscalls.h>
68911 +#include <linux/security.h>
68912 #include <linux/sched.h>
68913 #include <linux/export.h>
68914 #include <linux/rmap.h>
68915 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68916 return -EINVAL;
68917 if (end == start)
68918 return 0;
68919 + if (end > TASK_SIZE)
68920 + return -EINVAL;
68921 +
68922 vma = find_vma_prev(current->mm, start, &prev);
68923 if (!vma || vma->vm_start > start)
68924 return -ENOMEM;
68925 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68926 for (nstart = start ; ; ) {
68927 vm_flags_t newflags;
68928
68929 +#ifdef CONFIG_PAX_SEGMEXEC
68930 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68931 + break;
68932 +#endif
68933 +
68934 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68935
68936 newflags = vma->vm_flags | VM_LOCKED;
68937 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68938 lock_limit >>= PAGE_SHIFT;
68939
68940 /* check against resource limits */
68941 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68942 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68943 error = do_mlock(start, len, 1);
68944 up_write(&current->mm->mmap_sem);
68945 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68946 static int do_mlockall(int flags)
68947 {
68948 struct vm_area_struct * vma, * prev = NULL;
68949 - unsigned int def_flags = 0;
68950
68951 if (flags & MCL_FUTURE)
68952 - def_flags = VM_LOCKED;
68953 - current->mm->def_flags = def_flags;
68954 + current->mm->def_flags |= VM_LOCKED;
68955 + else
68956 + current->mm->def_flags &= ~VM_LOCKED;
68957 if (flags == MCL_FUTURE)
68958 goto out;
68959
68960 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68961 vm_flags_t newflags;
68962
68963 +#ifdef CONFIG_PAX_SEGMEXEC
68964 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68965 + break;
68966 +#endif
68967 +
68968 + BUG_ON(vma->vm_end > TASK_SIZE);
68969 newflags = vma->vm_flags | VM_LOCKED;
68970 if (!(flags & MCL_CURRENT))
68971 newflags &= ~VM_LOCKED;
68972 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68973 lock_limit >>= PAGE_SHIFT;
68974
68975 ret = -ENOMEM;
68976 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68977 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68978 capable(CAP_IPC_LOCK))
68979 ret = do_mlockall(flags);
68980 diff --git a/mm/mmap.c b/mm/mmap.c
68981 index eae90af..44552cf 100644
68982 --- a/mm/mmap.c
68983 +++ b/mm/mmap.c
68984 @@ -46,6 +46,16 @@
68985 #define arch_rebalance_pgtables(addr, len) (addr)
68986 #endif
68987
68988 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68989 +{
68990 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68991 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68992 + up_read(&mm->mmap_sem);
68993 + BUG();
68994 + }
68995 +#endif
68996 +}
68997 +
68998 static void unmap_region(struct mm_struct *mm,
68999 struct vm_area_struct *vma, struct vm_area_struct *prev,
69000 unsigned long start, unsigned long end);
69001 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
69002 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
69003 *
69004 */
69005 -pgprot_t protection_map[16] = {
69006 +pgprot_t protection_map[16] __read_only = {
69007 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
69008 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
69009 };
69010
69011 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
69012 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
69013 {
69014 - return __pgprot(pgprot_val(protection_map[vm_flags &
69015 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
69016 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
69017 pgprot_val(arch_vm_get_page_prot(vm_flags)));
69018 +
69019 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69020 + if (!(__supported_pte_mask & _PAGE_NX) &&
69021 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
69022 + (vm_flags & (VM_READ | VM_WRITE)))
69023 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
69024 +#endif
69025 +
69026 + return prot;
69027 }
69028 EXPORT_SYMBOL(vm_get_page_prot);
69029
69030 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
69031 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
69032 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
69033 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
69034 /*
69035 * Make sure vm_committed_as in one cacheline and not cacheline shared with
69036 * other variables. It can be updated by several CPUs frequently.
69037 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
69038 struct vm_area_struct *next = vma->vm_next;
69039
69040 might_sleep();
69041 + BUG_ON(vma->vm_mirror);
69042 if (vma->vm_ops && vma->vm_ops->close)
69043 vma->vm_ops->close(vma);
69044 if (vma->vm_file) {
69045 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
69046 * not page aligned -Ram Gupta
69047 */
69048 rlim = rlimit(RLIMIT_DATA);
69049 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
69050 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
69051 (mm->end_data - mm->start_data) > rlim)
69052 goto out;
69053 @@ -689,6 +711,12 @@ static int
69054 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
69055 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69056 {
69057 +
69058 +#ifdef CONFIG_PAX_SEGMEXEC
69059 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
69060 + return 0;
69061 +#endif
69062 +
69063 if (is_mergeable_vma(vma, file, vm_flags) &&
69064 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69065 if (vma->vm_pgoff == vm_pgoff)
69066 @@ -708,6 +736,12 @@ static int
69067 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69068 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69069 {
69070 +
69071 +#ifdef CONFIG_PAX_SEGMEXEC
69072 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
69073 + return 0;
69074 +#endif
69075 +
69076 if (is_mergeable_vma(vma, file, vm_flags) &&
69077 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69078 pgoff_t vm_pglen;
69079 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69080 struct vm_area_struct *vma_merge(struct mm_struct *mm,
69081 struct vm_area_struct *prev, unsigned long addr,
69082 unsigned long end, unsigned long vm_flags,
69083 - struct anon_vma *anon_vma, struct file *file,
69084 + struct anon_vma *anon_vma, struct file *file,
69085 pgoff_t pgoff, struct mempolicy *policy)
69086 {
69087 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
69088 struct vm_area_struct *area, *next;
69089 int err;
69090
69091 +#ifdef CONFIG_PAX_SEGMEXEC
69092 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
69093 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
69094 +
69095 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
69096 +#endif
69097 +
69098 /*
69099 * We later require that vma->vm_flags == vm_flags,
69100 * so this tests vma->vm_flags & VM_SPECIAL, too.
69101 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69102 if (next && next->vm_end == end) /* cases 6, 7, 8 */
69103 next = next->vm_next;
69104
69105 +#ifdef CONFIG_PAX_SEGMEXEC
69106 + if (prev)
69107 + prev_m = pax_find_mirror_vma(prev);
69108 + if (area)
69109 + area_m = pax_find_mirror_vma(area);
69110 + if (next)
69111 + next_m = pax_find_mirror_vma(next);
69112 +#endif
69113 +
69114 /*
69115 * Can it merge with the predecessor?
69116 */
69117 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69118 /* cases 1, 6 */
69119 err = vma_adjust(prev, prev->vm_start,
69120 next->vm_end, prev->vm_pgoff, NULL);
69121 - } else /* cases 2, 5, 7 */
69122 +
69123 +#ifdef CONFIG_PAX_SEGMEXEC
69124 + if (!err && prev_m)
69125 + err = vma_adjust(prev_m, prev_m->vm_start,
69126 + next_m->vm_end, prev_m->vm_pgoff, NULL);
69127 +#endif
69128 +
69129 + } else { /* cases 2, 5, 7 */
69130 err = vma_adjust(prev, prev->vm_start,
69131 end, prev->vm_pgoff, NULL);
69132 +
69133 +#ifdef CONFIG_PAX_SEGMEXEC
69134 + if (!err && prev_m)
69135 + err = vma_adjust(prev_m, prev_m->vm_start,
69136 + end_m, prev_m->vm_pgoff, NULL);
69137 +#endif
69138 +
69139 + }
69140 if (err)
69141 return NULL;
69142 khugepaged_enter_vma_merge(prev);
69143 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69144 mpol_equal(policy, vma_policy(next)) &&
69145 can_vma_merge_before(next, vm_flags,
69146 anon_vma, file, pgoff+pglen)) {
69147 - if (prev && addr < prev->vm_end) /* case 4 */
69148 + if (prev && addr < prev->vm_end) { /* case 4 */
69149 err = vma_adjust(prev, prev->vm_start,
69150 addr, prev->vm_pgoff, NULL);
69151 - else /* cases 3, 8 */
69152 +
69153 +#ifdef CONFIG_PAX_SEGMEXEC
69154 + if (!err && prev_m)
69155 + err = vma_adjust(prev_m, prev_m->vm_start,
69156 + addr_m, prev_m->vm_pgoff, NULL);
69157 +#endif
69158 +
69159 + } else { /* cases 3, 8 */
69160 err = vma_adjust(area, addr, next->vm_end,
69161 next->vm_pgoff - pglen, NULL);
69162 +
69163 +#ifdef CONFIG_PAX_SEGMEXEC
69164 + if (!err && area_m)
69165 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
69166 + next_m->vm_pgoff - pglen, NULL);
69167 +#endif
69168 +
69169 + }
69170 if (err)
69171 return NULL;
69172 khugepaged_enter_vma_merge(area);
69173 @@ -921,14 +1001,11 @@ none:
69174 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
69175 struct file *file, long pages)
69176 {
69177 - const unsigned long stack_flags
69178 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
69179 -
69180 if (file) {
69181 mm->shared_vm += pages;
69182 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
69183 mm->exec_vm += pages;
69184 - } else if (flags & stack_flags)
69185 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
69186 mm->stack_vm += pages;
69187 if (flags & (VM_RESERVED|VM_IO))
69188 mm->reserved_vm += pages;
69189 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69190 * (the exception is when the underlying filesystem is noexec
69191 * mounted, in which case we dont add PROT_EXEC.)
69192 */
69193 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69194 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69195 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
69196 prot |= PROT_EXEC;
69197
69198 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69199 /* Obtain the address to map to. we verify (or select) it and ensure
69200 * that it represents a valid section of the address space.
69201 */
69202 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
69203 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
69204 if (addr & ~PAGE_MASK)
69205 return addr;
69206
69207 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69208 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
69209 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
69210
69211 +#ifdef CONFIG_PAX_MPROTECT
69212 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69213 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69214 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
69215 + gr_log_rwxmmap(file);
69216 +
69217 +#ifdef CONFIG_PAX_EMUPLT
69218 + vm_flags &= ~VM_EXEC;
69219 +#else
69220 + return -EPERM;
69221 +#endif
69222 +
69223 + }
69224 +
69225 + if (!(vm_flags & VM_EXEC))
69226 + vm_flags &= ~VM_MAYEXEC;
69227 +#else
69228 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69229 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69230 +#endif
69231 + else
69232 + vm_flags &= ~VM_MAYWRITE;
69233 + }
69234 +#endif
69235 +
69236 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69237 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
69238 + vm_flags &= ~VM_PAGEEXEC;
69239 +#endif
69240 +
69241 if (flags & MAP_LOCKED)
69242 if (!can_do_mlock())
69243 return -EPERM;
69244 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69245 locked += mm->locked_vm;
69246 lock_limit = rlimit(RLIMIT_MEMLOCK);
69247 lock_limit >>= PAGE_SHIFT;
69248 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69249 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
69250 return -EAGAIN;
69251 }
69252 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69253 if (error)
69254 return error;
69255
69256 + if (!gr_acl_handle_mmap(file, prot))
69257 + return -EACCES;
69258 +
69259 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
69260 }
69261 EXPORT_SYMBOL(do_mmap_pgoff);
69262 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
69263 vm_flags_t vm_flags = vma->vm_flags;
69264
69265 /* If it was private or non-writable, the write bit is already clear */
69266 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
69267 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
69268 return 0;
69269
69270 /* The backer wishes to know when pages are first written to? */
69271 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
69272 unsigned long charged = 0;
69273 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
69274
69275 +#ifdef CONFIG_PAX_SEGMEXEC
69276 + struct vm_area_struct *vma_m = NULL;
69277 +#endif
69278 +
69279 + /*
69280 + * mm->mmap_sem is required to protect against another thread
69281 + * changing the mappings in case we sleep.
69282 + */
69283 + verify_mm_writelocked(mm);
69284 +
69285 /* Clear old maps */
69286 error = -ENOMEM;
69287 -munmap_back:
69288 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69289 if (vma && vma->vm_start < addr + len) {
69290 if (do_munmap(mm, addr, len))
69291 return -ENOMEM;
69292 - goto munmap_back;
69293 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69294 + BUG_ON(vma && vma->vm_start < addr + len);
69295 }
69296
69297 /* Check against address space limit. */
69298 @@ -1258,6 +1379,16 @@ munmap_back:
69299 goto unacct_error;
69300 }
69301
69302 +#ifdef CONFIG_PAX_SEGMEXEC
69303 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69304 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69305 + if (!vma_m) {
69306 + error = -ENOMEM;
69307 + goto free_vma;
69308 + }
69309 + }
69310 +#endif
69311 +
69312 vma->vm_mm = mm;
69313 vma->vm_start = addr;
69314 vma->vm_end = addr + len;
69315 @@ -1281,6 +1412,19 @@ munmap_back:
69316 error = file->f_op->mmap(file, vma);
69317 if (error)
69318 goto unmap_and_free_vma;
69319 +
69320 +#ifdef CONFIG_PAX_SEGMEXEC
69321 + if (vma_m && (vm_flags & VM_EXECUTABLE))
69322 + added_exe_file_vma(mm);
69323 +#endif
69324 +
69325 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69326 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69327 + vma->vm_flags |= VM_PAGEEXEC;
69328 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69329 + }
69330 +#endif
69331 +
69332 if (vm_flags & VM_EXECUTABLE)
69333 added_exe_file_vma(mm);
69334
69335 @@ -1316,6 +1460,11 @@ munmap_back:
69336 vma_link(mm, vma, prev, rb_link, rb_parent);
69337 file = vma->vm_file;
69338
69339 +#ifdef CONFIG_PAX_SEGMEXEC
69340 + if (vma_m)
69341 + BUG_ON(pax_mirror_vma(vma_m, vma));
69342 +#endif
69343 +
69344 /* Once vma denies write, undo our temporary denial count */
69345 if (correct_wcount)
69346 atomic_inc(&inode->i_writecount);
69347 @@ -1324,6 +1473,7 @@ out:
69348
69349 mm->total_vm += len >> PAGE_SHIFT;
69350 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69351 + track_exec_limit(mm, addr, addr + len, vm_flags);
69352 if (vm_flags & VM_LOCKED) {
69353 if (!mlock_vma_pages_range(vma, addr, addr + len))
69354 mm->locked_vm += (len >> PAGE_SHIFT);
69355 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69356 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69357 charged = 0;
69358 free_vma:
69359 +
69360 +#ifdef CONFIG_PAX_SEGMEXEC
69361 + if (vma_m)
69362 + kmem_cache_free(vm_area_cachep, vma_m);
69363 +#endif
69364 +
69365 kmem_cache_free(vm_area_cachep, vma);
69366 unacct_error:
69367 if (charged)
69368 @@ -1348,6 +1504,44 @@ unacct_error:
69369 return error;
69370 }
69371
69372 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69373 +{
69374 + if (!vma) {
69375 +#ifdef CONFIG_STACK_GROWSUP
69376 + if (addr > sysctl_heap_stack_gap)
69377 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69378 + else
69379 + vma = find_vma(current->mm, 0);
69380 + if (vma && (vma->vm_flags & VM_GROWSUP))
69381 + return false;
69382 +#endif
69383 + return true;
69384 + }
69385 +
69386 + if (addr + len > vma->vm_start)
69387 + return false;
69388 +
69389 + if (vma->vm_flags & VM_GROWSDOWN)
69390 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69391 +#ifdef CONFIG_STACK_GROWSUP
69392 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69393 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69394 +#endif
69395 +
69396 + return true;
69397 +}
69398 +
69399 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69400 +{
69401 + if (vma->vm_start < len)
69402 + return -ENOMEM;
69403 + if (!(vma->vm_flags & VM_GROWSDOWN))
69404 + return vma->vm_start - len;
69405 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
69406 + return vma->vm_start - len - sysctl_heap_stack_gap;
69407 + return -ENOMEM;
69408 +}
69409 +
69410 /* Get an address range which is currently unmapped.
69411 * For shmat() with addr=0.
69412 *
69413 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69414 if (flags & MAP_FIXED)
69415 return addr;
69416
69417 +#ifdef CONFIG_PAX_RANDMMAP
69418 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69419 +#endif
69420 +
69421 if (addr) {
69422 addr = PAGE_ALIGN(addr);
69423 - vma = find_vma(mm, addr);
69424 - if (TASK_SIZE - len >= addr &&
69425 - (!vma || addr + len <= vma->vm_start))
69426 - return addr;
69427 + if (TASK_SIZE - len >= addr) {
69428 + vma = find_vma(mm, addr);
69429 + if (check_heap_stack_gap(vma, addr, len))
69430 + return addr;
69431 + }
69432 }
69433 if (len > mm->cached_hole_size) {
69434 - start_addr = addr = mm->free_area_cache;
69435 + start_addr = addr = mm->free_area_cache;
69436 } else {
69437 - start_addr = addr = TASK_UNMAPPED_BASE;
69438 - mm->cached_hole_size = 0;
69439 + start_addr = addr = mm->mmap_base;
69440 + mm->cached_hole_size = 0;
69441 }
69442
69443 full_search:
69444 @@ -1396,34 +1595,40 @@ full_search:
69445 * Start a new search - just in case we missed
69446 * some holes.
69447 */
69448 - if (start_addr != TASK_UNMAPPED_BASE) {
69449 - addr = TASK_UNMAPPED_BASE;
69450 - start_addr = addr;
69451 + if (start_addr != mm->mmap_base) {
69452 + start_addr = addr = mm->mmap_base;
69453 mm->cached_hole_size = 0;
69454 goto full_search;
69455 }
69456 return -ENOMEM;
69457 }
69458 - if (!vma || addr + len <= vma->vm_start) {
69459 - /*
69460 - * Remember the place where we stopped the search:
69461 - */
69462 - mm->free_area_cache = addr + len;
69463 - return addr;
69464 - }
69465 + if (check_heap_stack_gap(vma, addr, len))
69466 + break;
69467 if (addr + mm->cached_hole_size < vma->vm_start)
69468 mm->cached_hole_size = vma->vm_start - addr;
69469 addr = vma->vm_end;
69470 }
69471 +
69472 + /*
69473 + * Remember the place where we stopped the search:
69474 + */
69475 + mm->free_area_cache = addr + len;
69476 + return addr;
69477 }
69478 #endif
69479
69480 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69481 {
69482 +
69483 +#ifdef CONFIG_PAX_SEGMEXEC
69484 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69485 + return;
69486 +#endif
69487 +
69488 /*
69489 * Is this a new hole at the lowest possible address?
69490 */
69491 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69492 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69493 mm->free_area_cache = addr;
69494 mm->cached_hole_size = ~0UL;
69495 }
69496 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69497 {
69498 struct vm_area_struct *vma;
69499 struct mm_struct *mm = current->mm;
69500 - unsigned long addr = addr0;
69501 + unsigned long base = mm->mmap_base, addr = addr0;
69502
69503 /* requested length too big for entire address space */
69504 if (len > TASK_SIZE)
69505 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69506 if (flags & MAP_FIXED)
69507 return addr;
69508
69509 +#ifdef CONFIG_PAX_RANDMMAP
69510 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69511 +#endif
69512 +
69513 /* requesting a specific address */
69514 if (addr) {
69515 addr = PAGE_ALIGN(addr);
69516 - vma = find_vma(mm, addr);
69517 - if (TASK_SIZE - len >= addr &&
69518 - (!vma || addr + len <= vma->vm_start))
69519 - return addr;
69520 + if (TASK_SIZE - len >= addr) {
69521 + vma = find_vma(mm, addr);
69522 + if (check_heap_stack_gap(vma, addr, len))
69523 + return addr;
69524 + }
69525 }
69526
69527 /* check if free_area_cache is useful for us */
69528 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69529 /* make sure it can fit in the remaining address space */
69530 if (addr > len) {
69531 vma = find_vma(mm, addr-len);
69532 - if (!vma || addr <= vma->vm_start)
69533 + if (check_heap_stack_gap(vma, addr - len, len))
69534 /* remember the address as a hint for next time */
69535 return (mm->free_area_cache = addr-len);
69536 }
69537 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69538 * return with success:
69539 */
69540 vma = find_vma(mm, addr);
69541 - if (!vma || addr+len <= vma->vm_start)
69542 + if (check_heap_stack_gap(vma, addr, len))
69543 /* remember the address as a hint for next time */
69544 return (mm->free_area_cache = addr);
69545
69546 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69547 mm->cached_hole_size = vma->vm_start - addr;
69548
69549 /* try just below the current vma->vm_start */
69550 - addr = vma->vm_start-len;
69551 - } while (len < vma->vm_start);
69552 + addr = skip_heap_stack_gap(vma, len);
69553 + } while (!IS_ERR_VALUE(addr));
69554
69555 bottomup:
69556 /*
69557 @@ -1507,13 +1717,21 @@ bottomup:
69558 * can happen with large stack limits and large mmap()
69559 * allocations.
69560 */
69561 + mm->mmap_base = TASK_UNMAPPED_BASE;
69562 +
69563 +#ifdef CONFIG_PAX_RANDMMAP
69564 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69565 + mm->mmap_base += mm->delta_mmap;
69566 +#endif
69567 +
69568 + mm->free_area_cache = mm->mmap_base;
69569 mm->cached_hole_size = ~0UL;
69570 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69571 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69572 /*
69573 * Restore the topdown base:
69574 */
69575 - mm->free_area_cache = mm->mmap_base;
69576 + mm->mmap_base = base;
69577 + mm->free_area_cache = base;
69578 mm->cached_hole_size = ~0UL;
69579
69580 return addr;
69581 @@ -1522,6 +1740,12 @@ bottomup:
69582
69583 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69584 {
69585 +
69586 +#ifdef CONFIG_PAX_SEGMEXEC
69587 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69588 + return;
69589 +#endif
69590 +
69591 /*
69592 * Is this a new hole at the highest possible address?
69593 */
69594 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69595 mm->free_area_cache = addr;
69596
69597 /* dont allow allocations above current base */
69598 - if (mm->free_area_cache > mm->mmap_base)
69599 + if (mm->free_area_cache > mm->mmap_base) {
69600 mm->free_area_cache = mm->mmap_base;
69601 + mm->cached_hole_size = ~0UL;
69602 + }
69603 }
69604
69605 unsigned long
69606 @@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69607
69608 EXPORT_SYMBOL(find_vma);
69609
69610 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
69611 +/*
69612 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
69613 + * Note: pprev is set to NULL when return value is NULL.
69614 + */
69615 struct vm_area_struct *
69616 find_vma_prev(struct mm_struct *mm, unsigned long addr,
69617 struct vm_area_struct **pprev)
69618 {
69619 - struct vm_area_struct *vma = NULL, *prev = NULL;
69620 - struct rb_node *rb_node;
69621 - if (!mm)
69622 - goto out;
69623 + struct vm_area_struct *vma;
69624
69625 - /* Guard against addr being lower than the first VMA */
69626 - vma = mm->mmap;
69627 + vma = find_vma(mm, addr);
69628 + *pprev = vma ? vma->vm_prev : NULL;
69629 + return vma;
69630 +}
69631
69632 - /* Go through the RB tree quickly. */
69633 - rb_node = mm->mm_rb.rb_node;
69634 +#ifdef CONFIG_PAX_SEGMEXEC
69635 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69636 +{
69637 + struct vm_area_struct *vma_m;
69638
69639 - while (rb_node) {
69640 - struct vm_area_struct *vma_tmp;
69641 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
69642 -
69643 - if (addr < vma_tmp->vm_end) {
69644 - rb_node = rb_node->rb_left;
69645 - } else {
69646 - prev = vma_tmp;
69647 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
69648 - break;
69649 - rb_node = rb_node->rb_right;
69650 - }
69651 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69652 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69653 + BUG_ON(vma->vm_mirror);
69654 + return NULL;
69655 }
69656 -
69657 -out:
69658 - *pprev = prev;
69659 - return prev ? prev->vm_next : vma;
69660 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69661 + vma_m = vma->vm_mirror;
69662 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69663 + BUG_ON(vma->vm_file != vma_m->vm_file);
69664 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69665 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69666 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69667 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69668 + return vma_m;
69669 }
69670 +#endif
69671
69672 /*
69673 * Verify that the stack growth is acceptable and
69674 @@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69675 return -ENOMEM;
69676
69677 /* Stack limit test */
69678 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69679 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69680 return -ENOMEM;
69681
69682 @@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69683 locked = mm->locked_vm + grow;
69684 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69685 limit >>= PAGE_SHIFT;
69686 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69687 if (locked > limit && !capable(CAP_IPC_LOCK))
69688 return -ENOMEM;
69689 }
69690 @@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69691 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69692 * vma is the last one with address > vma->vm_end. Have to extend vma.
69693 */
69694 +#ifndef CONFIG_IA64
69695 +static
69696 +#endif
69697 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69698 {
69699 int error;
69700 + bool locknext;
69701
69702 if (!(vma->vm_flags & VM_GROWSUP))
69703 return -EFAULT;
69704
69705 + /* Also guard against wrapping around to address 0. */
69706 + if (address < PAGE_ALIGN(address+1))
69707 + address = PAGE_ALIGN(address+1);
69708 + else
69709 + return -ENOMEM;
69710 +
69711 /*
69712 * We must make sure the anon_vma is allocated
69713 * so that the anon_vma locking is not a noop.
69714 */
69715 if (unlikely(anon_vma_prepare(vma)))
69716 return -ENOMEM;
69717 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69718 + if (locknext && anon_vma_prepare(vma->vm_next))
69719 + return -ENOMEM;
69720 vma_lock_anon_vma(vma);
69721 + if (locknext)
69722 + vma_lock_anon_vma(vma->vm_next);
69723
69724 /*
69725 * vma->vm_start/vm_end cannot change under us because the caller
69726 * is required to hold the mmap_sem in read mode. We need the
69727 - * anon_vma lock to serialize against concurrent expand_stacks.
69728 - * Also guard against wrapping around to address 0.
69729 + * anon_vma locks to serialize against concurrent expand_stacks
69730 + * and expand_upwards.
69731 */
69732 - if (address < PAGE_ALIGN(address+4))
69733 - address = PAGE_ALIGN(address+4);
69734 - else {
69735 - vma_unlock_anon_vma(vma);
69736 - return -ENOMEM;
69737 - }
69738 error = 0;
69739
69740 /* Somebody else might have raced and expanded it already */
69741 - if (address > vma->vm_end) {
69742 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69743 + error = -ENOMEM;
69744 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69745 unsigned long size, grow;
69746
69747 size = address - vma->vm_start;
69748 @@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69749 }
69750 }
69751 }
69752 + if (locknext)
69753 + vma_unlock_anon_vma(vma->vm_next);
69754 vma_unlock_anon_vma(vma);
69755 khugepaged_enter_vma_merge(vma);
69756 return error;
69757 @@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
69758 unsigned long address)
69759 {
69760 int error;
69761 + bool lockprev = false;
69762 + struct vm_area_struct *prev;
69763
69764 /*
69765 * We must make sure the anon_vma is allocated
69766 @@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
69767 if (error)
69768 return error;
69769
69770 + prev = vma->vm_prev;
69771 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69772 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69773 +#endif
69774 + if (lockprev && anon_vma_prepare(prev))
69775 + return -ENOMEM;
69776 + if (lockprev)
69777 + vma_lock_anon_vma(prev);
69778 +
69779 vma_lock_anon_vma(vma);
69780
69781 /*
69782 @@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
69783 */
69784
69785 /* Somebody else might have raced and expanded it already */
69786 - if (address < vma->vm_start) {
69787 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69788 + error = -ENOMEM;
69789 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69790 unsigned long size, grow;
69791
69792 +#ifdef CONFIG_PAX_SEGMEXEC
69793 + struct vm_area_struct *vma_m;
69794 +
69795 + vma_m = pax_find_mirror_vma(vma);
69796 +#endif
69797 +
69798 size = vma->vm_end - address;
69799 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69800
69801 @@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
69802 if (!error) {
69803 vma->vm_start = address;
69804 vma->vm_pgoff -= grow;
69805 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69806 +
69807 +#ifdef CONFIG_PAX_SEGMEXEC
69808 + if (vma_m) {
69809 + vma_m->vm_start -= grow << PAGE_SHIFT;
69810 + vma_m->vm_pgoff -= grow;
69811 + }
69812 +#endif
69813 +
69814 perf_event_mmap(vma);
69815 }
69816 }
69817 }
69818 vma_unlock_anon_vma(vma);
69819 + if (lockprev)
69820 + vma_unlock_anon_vma(prev);
69821 khugepaged_enter_vma_merge(vma);
69822 return error;
69823 }
69824 @@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69825 do {
69826 long nrpages = vma_pages(vma);
69827
69828 +#ifdef CONFIG_PAX_SEGMEXEC
69829 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69830 + vma = remove_vma(vma);
69831 + continue;
69832 + }
69833 +#endif
69834 +
69835 mm->total_vm -= nrpages;
69836 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69837 vma = remove_vma(vma);
69838 @@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69839 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69840 vma->vm_prev = NULL;
69841 do {
69842 +
69843 +#ifdef CONFIG_PAX_SEGMEXEC
69844 + if (vma->vm_mirror) {
69845 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69846 + vma->vm_mirror->vm_mirror = NULL;
69847 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69848 + vma->vm_mirror = NULL;
69849 + }
69850 +#endif
69851 +
69852 rb_erase(&vma->vm_rb, &mm->mm_rb);
69853 mm->map_count--;
69854 tail_vma = vma;
69855 @@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69856 struct vm_area_struct *new;
69857 int err = -ENOMEM;
69858
69859 +#ifdef CONFIG_PAX_SEGMEXEC
69860 + struct vm_area_struct *vma_m, *new_m = NULL;
69861 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69862 +#endif
69863 +
69864 if (is_vm_hugetlb_page(vma) && (addr &
69865 ~(huge_page_mask(hstate_vma(vma)))))
69866 return -EINVAL;
69867
69868 +#ifdef CONFIG_PAX_SEGMEXEC
69869 + vma_m = pax_find_mirror_vma(vma);
69870 +#endif
69871 +
69872 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69873 if (!new)
69874 goto out_err;
69875
69876 +#ifdef CONFIG_PAX_SEGMEXEC
69877 + if (vma_m) {
69878 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69879 + if (!new_m) {
69880 + kmem_cache_free(vm_area_cachep, new);
69881 + goto out_err;
69882 + }
69883 + }
69884 +#endif
69885 +
69886 /* most fields are the same, copy all, and then fixup */
69887 *new = *vma;
69888
69889 @@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69890 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69891 }
69892
69893 +#ifdef CONFIG_PAX_SEGMEXEC
69894 + if (vma_m) {
69895 + *new_m = *vma_m;
69896 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69897 + new_m->vm_mirror = new;
69898 + new->vm_mirror = new_m;
69899 +
69900 + if (new_below)
69901 + new_m->vm_end = addr_m;
69902 + else {
69903 + new_m->vm_start = addr_m;
69904 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69905 + }
69906 + }
69907 +#endif
69908 +
69909 pol = mpol_dup(vma_policy(vma));
69910 if (IS_ERR(pol)) {
69911 err = PTR_ERR(pol);
69912 @@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69913 else
69914 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69915
69916 +#ifdef CONFIG_PAX_SEGMEXEC
69917 + if (!err && vma_m) {
69918 + if (anon_vma_clone(new_m, vma_m))
69919 + goto out_free_mpol;
69920 +
69921 + mpol_get(pol);
69922 + vma_set_policy(new_m, pol);
69923 +
69924 + if (new_m->vm_file) {
69925 + get_file(new_m->vm_file);
69926 + if (vma_m->vm_flags & VM_EXECUTABLE)
69927 + added_exe_file_vma(mm);
69928 + }
69929 +
69930 + if (new_m->vm_ops && new_m->vm_ops->open)
69931 + new_m->vm_ops->open(new_m);
69932 +
69933 + if (new_below)
69934 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69935 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69936 + else
69937 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69938 +
69939 + if (err) {
69940 + if (new_m->vm_ops && new_m->vm_ops->close)
69941 + new_m->vm_ops->close(new_m);
69942 + if (new_m->vm_file) {
69943 + if (vma_m->vm_flags & VM_EXECUTABLE)
69944 + removed_exe_file_vma(mm);
69945 + fput(new_m->vm_file);
69946 + }
69947 + mpol_put(pol);
69948 + }
69949 + }
69950 +#endif
69951 +
69952 /* Success. */
69953 if (!err)
69954 return 0;
69955 @@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69956 removed_exe_file_vma(mm);
69957 fput(new->vm_file);
69958 }
69959 - unlink_anon_vmas(new);
69960 out_free_mpol:
69961 mpol_put(pol);
69962 out_free_vma:
69963 +
69964 +#ifdef CONFIG_PAX_SEGMEXEC
69965 + if (new_m) {
69966 + unlink_anon_vmas(new_m);
69967 + kmem_cache_free(vm_area_cachep, new_m);
69968 + }
69969 +#endif
69970 +
69971 + unlink_anon_vmas(new);
69972 kmem_cache_free(vm_area_cachep, new);
69973 out_err:
69974 return err;
69975 @@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69976 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69977 unsigned long addr, int new_below)
69978 {
69979 +
69980 +#ifdef CONFIG_PAX_SEGMEXEC
69981 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69982 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69983 + if (mm->map_count >= sysctl_max_map_count-1)
69984 + return -ENOMEM;
69985 + } else
69986 +#endif
69987 +
69988 if (mm->map_count >= sysctl_max_map_count)
69989 return -ENOMEM;
69990
69991 @@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69992 * work. This now handles partial unmappings.
69993 * Jeremy Fitzhardinge <jeremy@goop.org>
69994 */
69995 +#ifdef CONFIG_PAX_SEGMEXEC
69996 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69997 {
69998 + int ret = __do_munmap(mm, start, len);
69999 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
70000 + return ret;
70001 +
70002 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
70003 +}
70004 +
70005 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
70006 +#else
70007 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
70008 +#endif
70009 +{
70010 unsigned long end;
70011 struct vm_area_struct *vma, *prev, *last;
70012
70013 + /*
70014 + * mm->mmap_sem is required to protect against another thread
70015 + * changing the mappings in case we sleep.
70016 + */
70017 + verify_mm_writelocked(mm);
70018 +
70019 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
70020 return -EINVAL;
70021
70022 @@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
70023 /* Fix up all other VM information */
70024 remove_vma_list(mm, vma);
70025
70026 + track_exec_limit(mm, start, end, 0UL);
70027 +
70028 return 0;
70029 }
70030
70031 @@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
70032
70033 profile_munmap(addr);
70034
70035 +#ifdef CONFIG_PAX_SEGMEXEC
70036 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
70037 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
70038 + return -EINVAL;
70039 +#endif
70040 +
70041 down_write(&mm->mmap_sem);
70042 ret = do_munmap(mm, addr, len);
70043 up_write(&mm->mmap_sem);
70044 return ret;
70045 }
70046
70047 -static inline void verify_mm_writelocked(struct mm_struct *mm)
70048 -{
70049 -#ifdef CONFIG_DEBUG_VM
70050 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70051 - WARN_ON(1);
70052 - up_read(&mm->mmap_sem);
70053 - }
70054 -#endif
70055 -}
70056 -
70057 /*
70058 * this is really a simplified "do_mmap". it only handles
70059 * anonymous maps. eventually we may be able to do some
70060 @@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70061 struct rb_node ** rb_link, * rb_parent;
70062 pgoff_t pgoff = addr >> PAGE_SHIFT;
70063 int error;
70064 + unsigned long charged;
70065
70066 len = PAGE_ALIGN(len);
70067 if (!len)
70068 @@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70069
70070 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
70071
70072 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
70073 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
70074 + flags &= ~VM_EXEC;
70075 +
70076 +#ifdef CONFIG_PAX_MPROTECT
70077 + if (mm->pax_flags & MF_PAX_MPROTECT)
70078 + flags &= ~VM_MAYEXEC;
70079 +#endif
70080 +
70081 + }
70082 +#endif
70083 +
70084 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
70085 if (error & ~PAGE_MASK)
70086 return error;
70087
70088 + charged = len >> PAGE_SHIFT;
70089 +
70090 /*
70091 * mlock MCL_FUTURE?
70092 */
70093 if (mm->def_flags & VM_LOCKED) {
70094 unsigned long locked, lock_limit;
70095 - locked = len >> PAGE_SHIFT;
70096 + locked = charged;
70097 locked += mm->locked_vm;
70098 lock_limit = rlimit(RLIMIT_MEMLOCK);
70099 lock_limit >>= PAGE_SHIFT;
70100 @@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70101 /*
70102 * Clear old maps. this also does some error checking for us
70103 */
70104 - munmap_back:
70105 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70106 if (vma && vma->vm_start < addr + len) {
70107 if (do_munmap(mm, addr, len))
70108 return -ENOMEM;
70109 - goto munmap_back;
70110 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70111 + BUG_ON(vma && vma->vm_start < addr + len);
70112 }
70113
70114 /* Check against address space limits *after* clearing old maps... */
70115 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
70116 + if (!may_expand_vm(mm, charged))
70117 return -ENOMEM;
70118
70119 if (mm->map_count > sysctl_max_map_count)
70120 return -ENOMEM;
70121
70122 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
70123 + if (security_vm_enough_memory(charged))
70124 return -ENOMEM;
70125
70126 /* Can we just expand an old private anonymous mapping? */
70127 @@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70128 */
70129 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70130 if (!vma) {
70131 - vm_unacct_memory(len >> PAGE_SHIFT);
70132 + vm_unacct_memory(charged);
70133 return -ENOMEM;
70134 }
70135
70136 @@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70137 vma_link(mm, vma, prev, rb_link, rb_parent);
70138 out:
70139 perf_event_mmap(vma);
70140 - mm->total_vm += len >> PAGE_SHIFT;
70141 + mm->total_vm += charged;
70142 if (flags & VM_LOCKED) {
70143 if (!mlock_vma_pages_range(vma, addr, addr + len))
70144 - mm->locked_vm += (len >> PAGE_SHIFT);
70145 + mm->locked_vm += charged;
70146 }
70147 + track_exec_limit(mm, addr, addr + len, flags);
70148 return addr;
70149 }
70150
70151 @@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
70152 * Walk the list again, actually closing and freeing it,
70153 * with preemption enabled, without holding any MM locks.
70154 */
70155 - while (vma)
70156 + while (vma) {
70157 + vma->vm_mirror = NULL;
70158 vma = remove_vma(vma);
70159 + }
70160
70161 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
70162 }
70163 @@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70164 struct vm_area_struct * __vma, * prev;
70165 struct rb_node ** rb_link, * rb_parent;
70166
70167 +#ifdef CONFIG_PAX_SEGMEXEC
70168 + struct vm_area_struct *vma_m = NULL;
70169 +#endif
70170 +
70171 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
70172 + return -EPERM;
70173 +
70174 /*
70175 * The vm_pgoff of a purely anonymous vma should be irrelevant
70176 * until its first write fault, when page's anon_vma and index
70177 @@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70178 if ((vma->vm_flags & VM_ACCOUNT) &&
70179 security_vm_enough_memory_mm(mm, vma_pages(vma)))
70180 return -ENOMEM;
70181 +
70182 +#ifdef CONFIG_PAX_SEGMEXEC
70183 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
70184 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70185 + if (!vma_m)
70186 + return -ENOMEM;
70187 + }
70188 +#endif
70189 +
70190 vma_link(mm, vma, prev, rb_link, rb_parent);
70191 +
70192 +#ifdef CONFIG_PAX_SEGMEXEC
70193 + if (vma_m)
70194 + BUG_ON(pax_mirror_vma(vma_m, vma));
70195 +#endif
70196 +
70197 return 0;
70198 }
70199
70200 @@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70201 struct rb_node **rb_link, *rb_parent;
70202 struct mempolicy *pol;
70203
70204 + BUG_ON(vma->vm_mirror);
70205 +
70206 /*
70207 * If anonymous vma has not yet been faulted, update new pgoff
70208 * to match new location, to increase its chance of merging.
70209 @@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70210 return NULL;
70211 }
70212
70213 +#ifdef CONFIG_PAX_SEGMEXEC
70214 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
70215 +{
70216 + struct vm_area_struct *prev_m;
70217 + struct rb_node **rb_link_m, *rb_parent_m;
70218 + struct mempolicy *pol_m;
70219 +
70220 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
70221 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
70222 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
70223 + *vma_m = *vma;
70224 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
70225 + if (anon_vma_clone(vma_m, vma))
70226 + return -ENOMEM;
70227 + pol_m = vma_policy(vma_m);
70228 + mpol_get(pol_m);
70229 + vma_set_policy(vma_m, pol_m);
70230 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
70231 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
70232 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
70233 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
70234 + if (vma_m->vm_file)
70235 + get_file(vma_m->vm_file);
70236 + if (vma_m->vm_ops && vma_m->vm_ops->open)
70237 + vma_m->vm_ops->open(vma_m);
70238 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
70239 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
70240 + vma_m->vm_mirror = vma;
70241 + vma->vm_mirror = vma_m;
70242 + return 0;
70243 +}
70244 +#endif
70245 +
70246 /*
70247 * Return true if the calling process may expand its vm space by the passed
70248 * number of pages
70249 @@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
70250 unsigned long lim;
70251
70252 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
70253 -
70254 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
70255 if (cur + npages > lim)
70256 return 0;
70257 return 1;
70258 @@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
70259 vma->vm_start = addr;
70260 vma->vm_end = addr + len;
70261
70262 +#ifdef CONFIG_PAX_MPROTECT
70263 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70264 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70265 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
70266 + return -EPERM;
70267 + if (!(vm_flags & VM_EXEC))
70268 + vm_flags &= ~VM_MAYEXEC;
70269 +#else
70270 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70271 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70272 +#endif
70273 + else
70274 + vm_flags &= ~VM_MAYWRITE;
70275 + }
70276 +#endif
70277 +
70278 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
70279 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70280
70281 diff --git a/mm/mprotect.c b/mm/mprotect.c
70282 index 5a688a2..27e031c 100644
70283 --- a/mm/mprotect.c
70284 +++ b/mm/mprotect.c
70285 @@ -23,10 +23,16 @@
70286 #include <linux/mmu_notifier.h>
70287 #include <linux/migrate.h>
70288 #include <linux/perf_event.h>
70289 +
70290 +#ifdef CONFIG_PAX_MPROTECT
70291 +#include <linux/elf.h>
70292 +#endif
70293 +
70294 #include <asm/uaccess.h>
70295 #include <asm/pgtable.h>
70296 #include <asm/cacheflush.h>
70297 #include <asm/tlbflush.h>
70298 +#include <asm/mmu_context.h>
70299
70300 #ifndef pgprot_modify
70301 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70302 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
70303 flush_tlb_range(vma, start, end);
70304 }
70305
70306 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70307 +/* called while holding the mmap semaphor for writing except stack expansion */
70308 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70309 +{
70310 + unsigned long oldlimit, newlimit = 0UL;
70311 +
70312 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70313 + return;
70314 +
70315 + spin_lock(&mm->page_table_lock);
70316 + oldlimit = mm->context.user_cs_limit;
70317 + if ((prot & VM_EXEC) && oldlimit < end)
70318 + /* USER_CS limit moved up */
70319 + newlimit = end;
70320 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70321 + /* USER_CS limit moved down */
70322 + newlimit = start;
70323 +
70324 + if (newlimit) {
70325 + mm->context.user_cs_limit = newlimit;
70326 +
70327 +#ifdef CONFIG_SMP
70328 + wmb();
70329 + cpus_clear(mm->context.cpu_user_cs_mask);
70330 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70331 +#endif
70332 +
70333 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70334 + }
70335 + spin_unlock(&mm->page_table_lock);
70336 + if (newlimit == end) {
70337 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
70338 +
70339 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
70340 + if (is_vm_hugetlb_page(vma))
70341 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70342 + else
70343 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70344 + }
70345 +}
70346 +#endif
70347 +
70348 int
70349 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70350 unsigned long start, unsigned long end, unsigned long newflags)
70351 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70352 int error;
70353 int dirty_accountable = 0;
70354
70355 +#ifdef CONFIG_PAX_SEGMEXEC
70356 + struct vm_area_struct *vma_m = NULL;
70357 + unsigned long start_m, end_m;
70358 +
70359 + start_m = start + SEGMEXEC_TASK_SIZE;
70360 + end_m = end + SEGMEXEC_TASK_SIZE;
70361 +#endif
70362 +
70363 if (newflags == oldflags) {
70364 *pprev = vma;
70365 return 0;
70366 }
70367
70368 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70369 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70370 +
70371 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70372 + return -ENOMEM;
70373 +
70374 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70375 + return -ENOMEM;
70376 + }
70377 +
70378 /*
70379 * If we make a private mapping writable we increase our commit;
70380 * but (without finer accounting) cannot reduce our commit if we
70381 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70382 }
70383 }
70384
70385 +#ifdef CONFIG_PAX_SEGMEXEC
70386 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70387 + if (start != vma->vm_start) {
70388 + error = split_vma(mm, vma, start, 1);
70389 + if (error)
70390 + goto fail;
70391 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70392 + *pprev = (*pprev)->vm_next;
70393 + }
70394 +
70395 + if (end != vma->vm_end) {
70396 + error = split_vma(mm, vma, end, 0);
70397 + if (error)
70398 + goto fail;
70399 + }
70400 +
70401 + if (pax_find_mirror_vma(vma)) {
70402 + error = __do_munmap(mm, start_m, end_m - start_m);
70403 + if (error)
70404 + goto fail;
70405 + } else {
70406 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70407 + if (!vma_m) {
70408 + error = -ENOMEM;
70409 + goto fail;
70410 + }
70411 + vma->vm_flags = newflags;
70412 + error = pax_mirror_vma(vma_m, vma);
70413 + if (error) {
70414 + vma->vm_flags = oldflags;
70415 + goto fail;
70416 + }
70417 + }
70418 + }
70419 +#endif
70420 +
70421 /*
70422 * First try to merge with previous and/or next vma.
70423 */
70424 @@ -204,9 +306,21 @@ success:
70425 * vm_flags and vm_page_prot are protected by the mmap_sem
70426 * held in write mode.
70427 */
70428 +
70429 +#ifdef CONFIG_PAX_SEGMEXEC
70430 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70431 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70432 +#endif
70433 +
70434 vma->vm_flags = newflags;
70435 +
70436 +#ifdef CONFIG_PAX_MPROTECT
70437 + if (mm->binfmt && mm->binfmt->handle_mprotect)
70438 + mm->binfmt->handle_mprotect(vma, newflags);
70439 +#endif
70440 +
70441 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70442 - vm_get_page_prot(newflags));
70443 + vm_get_page_prot(vma->vm_flags));
70444
70445 if (vma_wants_writenotify(vma)) {
70446 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70447 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70448 end = start + len;
70449 if (end <= start)
70450 return -ENOMEM;
70451 +
70452 +#ifdef CONFIG_PAX_SEGMEXEC
70453 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70454 + if (end > SEGMEXEC_TASK_SIZE)
70455 + return -EINVAL;
70456 + } else
70457 +#endif
70458 +
70459 + if (end > TASK_SIZE)
70460 + return -EINVAL;
70461 +
70462 if (!arch_validate_prot(prot))
70463 return -EINVAL;
70464
70465 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70466 /*
70467 * Does the application expect PROT_READ to imply PROT_EXEC:
70468 */
70469 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70470 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70471 prot |= PROT_EXEC;
70472
70473 vm_flags = calc_vm_prot_bits(prot);
70474 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70475 if (start > vma->vm_start)
70476 prev = vma;
70477
70478 +#ifdef CONFIG_PAX_MPROTECT
70479 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70480 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70481 +#endif
70482 +
70483 for (nstart = start ; ; ) {
70484 unsigned long newflags;
70485
70486 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70487
70488 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70489 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70490 + if (prot & (PROT_WRITE | PROT_EXEC))
70491 + gr_log_rwxmprotect(vma->vm_file);
70492 +
70493 + error = -EACCES;
70494 + goto out;
70495 + }
70496 +
70497 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70498 error = -EACCES;
70499 goto out;
70500 }
70501 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70502 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70503 if (error)
70504 goto out;
70505 +
70506 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70507 +
70508 nstart = tmp;
70509
70510 if (nstart < prev->vm_end)
70511 diff --git a/mm/mremap.c b/mm/mremap.c
70512 index d6959cb..18a402a 100644
70513 --- a/mm/mremap.c
70514 +++ b/mm/mremap.c
70515 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70516 continue;
70517 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70518 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70519 +
70520 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70521 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70522 + pte = pte_exprotect(pte);
70523 +#endif
70524 +
70525 set_pte_at(mm, new_addr, new_pte, pte);
70526 }
70527
70528 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70529 if (is_vm_hugetlb_page(vma))
70530 goto Einval;
70531
70532 +#ifdef CONFIG_PAX_SEGMEXEC
70533 + if (pax_find_mirror_vma(vma))
70534 + goto Einval;
70535 +#endif
70536 +
70537 /* We can't remap across vm area boundaries */
70538 if (old_len > vma->vm_end - addr)
70539 goto Efault;
70540 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70541 unsigned long ret = -EINVAL;
70542 unsigned long charged = 0;
70543 unsigned long map_flags;
70544 + unsigned long pax_task_size = TASK_SIZE;
70545
70546 if (new_addr & ~PAGE_MASK)
70547 goto out;
70548
70549 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70550 +#ifdef CONFIG_PAX_SEGMEXEC
70551 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70552 + pax_task_size = SEGMEXEC_TASK_SIZE;
70553 +#endif
70554 +
70555 + pax_task_size -= PAGE_SIZE;
70556 +
70557 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70558 goto out;
70559
70560 /* Check if the location we're moving into overlaps the
70561 * old location at all, and fail if it does.
70562 */
70563 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70564 - goto out;
70565 -
70566 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70567 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70568 goto out;
70569
70570 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70571 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70572 struct vm_area_struct *vma;
70573 unsigned long ret = -EINVAL;
70574 unsigned long charged = 0;
70575 + unsigned long pax_task_size = TASK_SIZE;
70576
70577 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70578 goto out;
70579 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70580 if (!new_len)
70581 goto out;
70582
70583 +#ifdef CONFIG_PAX_SEGMEXEC
70584 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70585 + pax_task_size = SEGMEXEC_TASK_SIZE;
70586 +#endif
70587 +
70588 + pax_task_size -= PAGE_SIZE;
70589 +
70590 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70591 + old_len > pax_task_size || addr > pax_task_size-old_len)
70592 + goto out;
70593 +
70594 if (flags & MREMAP_FIXED) {
70595 if (flags & MREMAP_MAYMOVE)
70596 ret = mremap_to(addr, old_len, new_addr, new_len);
70597 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70598 addr + new_len);
70599 }
70600 ret = addr;
70601 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70602 goto out;
70603 }
70604 }
70605 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70606 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70607 if (ret)
70608 goto out;
70609 +
70610 + map_flags = vma->vm_flags;
70611 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70612 + if (!(ret & ~PAGE_MASK)) {
70613 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70614 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70615 + }
70616 }
70617 out:
70618 if (ret & ~PAGE_MASK)
70619 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70620 index 7fa41b4..6087460 100644
70621 --- a/mm/nobootmem.c
70622 +++ b/mm/nobootmem.c
70623 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70624 unsigned long __init free_all_memory_core_early(int nodeid)
70625 {
70626 int i;
70627 - u64 start, end;
70628 + u64 start, end, startrange, endrange;
70629 unsigned long count = 0;
70630 - struct range *range = NULL;
70631 + struct range *range = NULL, rangerange = { 0, 0 };
70632 int nr_range;
70633
70634 nr_range = get_free_all_memory_range(&range, nodeid);
70635 + startrange = __pa(range) >> PAGE_SHIFT;
70636 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70637
70638 for (i = 0; i < nr_range; i++) {
70639 start = range[i].start;
70640 end = range[i].end;
70641 + if (start <= endrange && startrange < end) {
70642 + BUG_ON(rangerange.start | rangerange.end);
70643 + rangerange = range[i];
70644 + continue;
70645 + }
70646 count += end - start;
70647 __free_pages_memory(start, end);
70648 }
70649 + start = rangerange.start;
70650 + end = rangerange.end;
70651 + count += end - start;
70652 + __free_pages_memory(start, end);
70653
70654 return count;
70655 }
70656 diff --git a/mm/nommu.c b/mm/nommu.c
70657 index ee7e57e..cae4e40 100644
70658 --- a/mm/nommu.c
70659 +++ b/mm/nommu.c
70660 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70661 int sysctl_overcommit_ratio = 50; /* default is 50% */
70662 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70663 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70664 -int heap_stack_gap = 0;
70665
70666 atomic_long_t mmap_pages_allocated;
70667
70668 @@ -829,15 +828,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70669 EXPORT_SYMBOL(find_vma);
70670
70671 /*
70672 - * find a VMA
70673 - * - we don't extend stack VMAs under NOMMU conditions
70674 - */
70675 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70676 -{
70677 - return find_vma(mm, addr);
70678 -}
70679 -
70680 -/*
70681 * expand a stack to a given address
70682 * - not supported under NOMMU conditions
70683 */
70684 @@ -1557,6 +1547,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70685
70686 /* most fields are the same, copy all, and then fixup */
70687 *new = *vma;
70688 + INIT_LIST_HEAD(&new->anon_vma_chain);
70689 *region = *vma->vm_region;
70690 new->vm_region = region;
70691
70692 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70693 index 485be89..c059ad3 100644
70694 --- a/mm/page_alloc.c
70695 +++ b/mm/page_alloc.c
70696 @@ -341,7 +341,7 @@ out:
70697 * This usage means that zero-order pages may not be compound.
70698 */
70699
70700 -static void free_compound_page(struct page *page)
70701 +void free_compound_page(struct page *page)
70702 {
70703 __free_pages_ok(page, compound_order(page));
70704 }
70705 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70706 int i;
70707 int bad = 0;
70708
70709 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70710 + unsigned long index = 1UL << order;
70711 +#endif
70712 +
70713 trace_mm_page_free_direct(page, order);
70714 kmemcheck_free_shadow(page, order);
70715
70716 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70717 debug_check_no_obj_freed(page_address(page),
70718 PAGE_SIZE << order);
70719 }
70720 +
70721 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70722 + for (; index; --index)
70723 + sanitize_highpage(page + index - 1);
70724 +#endif
70725 +
70726 arch_free_page(page, order);
70727 kernel_map_pages(page, 1 << order, 0);
70728
70729 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70730 arch_alloc_page(page, order);
70731 kernel_map_pages(page, 1 << order, 1);
70732
70733 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70734 if (gfp_flags & __GFP_ZERO)
70735 prep_zero_page(page, order, gfp_flags);
70736 +#endif
70737
70738 if (order && (gfp_flags & __GFP_COMP))
70739 prep_compound_page(page, order);
70740 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70741 unsigned long pfn;
70742
70743 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70744 +#ifdef CONFIG_X86_32
70745 + /* boot failures in VMware 8 on 32bit vanilla since
70746 + this change */
70747 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70748 +#else
70749 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70750 +#endif
70751 return 1;
70752 }
70753 return 0;
70754 diff --git a/mm/percpu.c b/mm/percpu.c
70755 index 716eb4a..8d10419 100644
70756 --- a/mm/percpu.c
70757 +++ b/mm/percpu.c
70758 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70759 static unsigned int pcpu_high_unit_cpu __read_mostly;
70760
70761 /* the address of the first chunk which starts with the kernel static area */
70762 -void *pcpu_base_addr __read_mostly;
70763 +void *pcpu_base_addr __read_only;
70764 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70765
70766 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70767 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70768 index e920aa3..137702a 100644
70769 --- a/mm/process_vm_access.c
70770 +++ b/mm/process_vm_access.c
70771 @@ -13,6 +13,7 @@
70772 #include <linux/uio.h>
70773 #include <linux/sched.h>
70774 #include <linux/highmem.h>
70775 +#include <linux/security.h>
70776 #include <linux/ptrace.h>
70777 #include <linux/slab.h>
70778 #include <linux/syscalls.h>
70779 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70780 size_t iov_l_curr_offset = 0;
70781 ssize_t iov_len;
70782
70783 + return -ENOSYS; // PaX: until properly audited
70784 +
70785 /*
70786 * Work out how many pages of struct pages we're going to need
70787 * when eventually calling get_user_pages
70788 */
70789 for (i = 0; i < riovcnt; i++) {
70790 iov_len = rvec[i].iov_len;
70791 - if (iov_len > 0) {
70792 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
70793 - + iov_len)
70794 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70795 - / PAGE_SIZE + 1;
70796 - nr_pages = max(nr_pages, nr_pages_iov);
70797 - }
70798 + if (iov_len <= 0)
70799 + continue;
70800 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70801 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70802 + nr_pages = max(nr_pages, nr_pages_iov);
70803 }
70804
70805 if (nr_pages == 0)
70806 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70807 goto free_proc_pages;
70808 }
70809
70810 - task_lock(task);
70811 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70812 - task_unlock(task);
70813 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70814 rc = -EPERM;
70815 goto put_task_struct;
70816 }
70817 - mm = task->mm;
70818
70819 - if (!mm || (task->flags & PF_KTHREAD)) {
70820 - task_unlock(task);
70821 - rc = -EINVAL;
70822 + mm = mm_access(task, PTRACE_MODE_ATTACH);
70823 + if (!mm || IS_ERR(mm)) {
70824 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70825 + /*
70826 + * Explicitly map EACCES to EPERM as EPERM is a more a
70827 + * appropriate error code for process_vw_readv/writev
70828 + */
70829 + if (rc == -EACCES)
70830 + rc = -EPERM;
70831 goto put_task_struct;
70832 }
70833
70834 - atomic_inc(&mm->mm_users);
70835 - task_unlock(task);
70836 -
70837 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70838 rc = process_vm_rw_single_vec(
70839 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70840 diff --git a/mm/rmap.c b/mm/rmap.c
70841 index a4fd368..e0ffec7 100644
70842 --- a/mm/rmap.c
70843 +++ b/mm/rmap.c
70844 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70845 struct anon_vma *anon_vma = vma->anon_vma;
70846 struct anon_vma_chain *avc;
70847
70848 +#ifdef CONFIG_PAX_SEGMEXEC
70849 + struct anon_vma_chain *avc_m = NULL;
70850 +#endif
70851 +
70852 might_sleep();
70853 if (unlikely(!anon_vma)) {
70854 struct mm_struct *mm = vma->vm_mm;
70855 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70856 if (!avc)
70857 goto out_enomem;
70858
70859 +#ifdef CONFIG_PAX_SEGMEXEC
70860 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70861 + if (!avc_m)
70862 + goto out_enomem_free_avc;
70863 +#endif
70864 +
70865 anon_vma = find_mergeable_anon_vma(vma);
70866 allocated = NULL;
70867 if (!anon_vma) {
70868 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70869 /* page_table_lock to protect against threads */
70870 spin_lock(&mm->page_table_lock);
70871 if (likely(!vma->anon_vma)) {
70872 +
70873 +#ifdef CONFIG_PAX_SEGMEXEC
70874 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70875 +
70876 + if (vma_m) {
70877 + BUG_ON(vma_m->anon_vma);
70878 + vma_m->anon_vma = anon_vma;
70879 + avc_m->anon_vma = anon_vma;
70880 + avc_m->vma = vma;
70881 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70882 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70883 + avc_m = NULL;
70884 + }
70885 +#endif
70886 +
70887 vma->anon_vma = anon_vma;
70888 avc->anon_vma = anon_vma;
70889 avc->vma = vma;
70890 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70891
70892 if (unlikely(allocated))
70893 put_anon_vma(allocated);
70894 +
70895 +#ifdef CONFIG_PAX_SEGMEXEC
70896 + if (unlikely(avc_m))
70897 + anon_vma_chain_free(avc_m);
70898 +#endif
70899 +
70900 if (unlikely(avc))
70901 anon_vma_chain_free(avc);
70902 }
70903 return 0;
70904
70905 out_enomem_free_avc:
70906 +
70907 +#ifdef CONFIG_PAX_SEGMEXEC
70908 + if (avc_m)
70909 + anon_vma_chain_free(avc_m);
70910 +#endif
70911 +
70912 anon_vma_chain_free(avc);
70913 out_enomem:
70914 return -ENOMEM;
70915 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70916 * Attach the anon_vmas from src to dst.
70917 * Returns 0 on success, -ENOMEM on failure.
70918 */
70919 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70920 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70921 {
70922 struct anon_vma_chain *avc, *pavc;
70923 struct anon_vma *root = NULL;
70924 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70925 * the corresponding VMA in the parent process is attached to.
70926 * Returns 0 on success, non-zero on failure.
70927 */
70928 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70929 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70930 {
70931 struct anon_vma_chain *avc;
70932 struct anon_vma *anon_vma;
70933 diff --git a/mm/shmem.c b/mm/shmem.c
70934 index 6c253f7..367e20a 100644
70935 --- a/mm/shmem.c
70936 +++ b/mm/shmem.c
70937 @@ -31,7 +31,7 @@
70938 #include <linux/export.h>
70939 #include <linux/swap.h>
70940
70941 -static struct vfsmount *shm_mnt;
70942 +struct vfsmount *shm_mnt;
70943
70944 #ifdef CONFIG_SHMEM
70945 /*
70946 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70947 #define BOGO_DIRENT_SIZE 20
70948
70949 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70950 -#define SHORT_SYMLINK_LEN 128
70951 +#define SHORT_SYMLINK_LEN 64
70952
70953 struct shmem_xattr {
70954 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70955 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70956 int err = -ENOMEM;
70957
70958 /* Round up to L1_CACHE_BYTES to resist false sharing */
70959 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70960 - L1_CACHE_BYTES), GFP_KERNEL);
70961 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70962 if (!sbinfo)
70963 return -ENOMEM;
70964
70965 diff --git a/mm/slab.c b/mm/slab.c
70966 index 83311c9a..fcf8f86 100644
70967 --- a/mm/slab.c
70968 +++ b/mm/slab.c
70969 @@ -151,7 +151,7 @@
70970
70971 /* Legal flag mask for kmem_cache_create(). */
70972 #if DEBUG
70973 -# define CREATE_MASK (SLAB_RED_ZONE | \
70974 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70975 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70976 SLAB_CACHE_DMA | \
70977 SLAB_STORE_USER | \
70978 @@ -159,7 +159,7 @@
70979 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70980 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70981 #else
70982 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70983 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70984 SLAB_CACHE_DMA | \
70985 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70986 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70987 @@ -288,7 +288,7 @@ struct kmem_list3 {
70988 * Need this for bootstrapping a per node allocator.
70989 */
70990 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70991 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70992 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70993 #define CACHE_CACHE 0
70994 #define SIZE_AC MAX_NUMNODES
70995 #define SIZE_L3 (2 * MAX_NUMNODES)
70996 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70997 if ((x)->max_freeable < i) \
70998 (x)->max_freeable = i; \
70999 } while (0)
71000 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
71001 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
71002 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
71003 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
71004 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
71005 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
71006 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
71007 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
71008 #else
71009 #define STATS_INC_ACTIVE(x) do { } while (0)
71010 #define STATS_DEC_ACTIVE(x) do { } while (0)
71011 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
71012 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
71013 */
71014 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
71015 - const struct slab *slab, void *obj)
71016 + const struct slab *slab, const void *obj)
71017 {
71018 u32 offset = (obj - slab->s_mem);
71019 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
71020 @@ -564,7 +564,7 @@ struct cache_names {
71021 static struct cache_names __initdata cache_names[] = {
71022 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
71023 #include <linux/kmalloc_sizes.h>
71024 - {NULL,}
71025 + {NULL}
71026 #undef CACHE
71027 };
71028
71029 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
71030 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
71031 sizes[INDEX_AC].cs_size,
71032 ARCH_KMALLOC_MINALIGN,
71033 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71034 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71035 NULL);
71036
71037 if (INDEX_AC != INDEX_L3) {
71038 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
71039 kmem_cache_create(names[INDEX_L3].name,
71040 sizes[INDEX_L3].cs_size,
71041 ARCH_KMALLOC_MINALIGN,
71042 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71043 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71044 NULL);
71045 }
71046
71047 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
71048 sizes->cs_cachep = kmem_cache_create(names->name,
71049 sizes->cs_size,
71050 ARCH_KMALLOC_MINALIGN,
71051 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71052 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71053 NULL);
71054 }
71055 #ifdef CONFIG_ZONE_DMA
71056 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
71057 }
71058 /* cpu stats */
71059 {
71060 - unsigned long allochit = atomic_read(&cachep->allochit);
71061 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
71062 - unsigned long freehit = atomic_read(&cachep->freehit);
71063 - unsigned long freemiss = atomic_read(&cachep->freemiss);
71064 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
71065 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
71066 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
71067 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
71068
71069 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
71070 allochit, allocmiss, freehit, freemiss);
71071 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
71072 {
71073 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
71074 #ifdef CONFIG_DEBUG_SLAB_LEAK
71075 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
71076 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
71077 #endif
71078 return 0;
71079 }
71080 module_init(slab_proc_init);
71081 #endif
71082
71083 +void check_object_size(const void *ptr, unsigned long n, bool to)
71084 +{
71085 +
71086 +#ifdef CONFIG_PAX_USERCOPY
71087 + struct page *page;
71088 + struct kmem_cache *cachep = NULL;
71089 + struct slab *slabp;
71090 + unsigned int objnr;
71091 + unsigned long offset;
71092 + const char *type;
71093 +
71094 + if (!n)
71095 + return;
71096 +
71097 + type = "<null>";
71098 + if (ZERO_OR_NULL_PTR(ptr))
71099 + goto report;
71100 +
71101 + if (!virt_addr_valid(ptr))
71102 + return;
71103 +
71104 + page = virt_to_head_page(ptr);
71105 +
71106 + type = "<process stack>";
71107 + if (!PageSlab(page)) {
71108 + if (object_is_on_stack(ptr, n) == -1)
71109 + goto report;
71110 + return;
71111 + }
71112 +
71113 + cachep = page_get_cache(page);
71114 + type = cachep->name;
71115 + if (!(cachep->flags & SLAB_USERCOPY))
71116 + goto report;
71117 +
71118 + slabp = page_get_slab(page);
71119 + objnr = obj_to_index(cachep, slabp, ptr);
71120 + BUG_ON(objnr >= cachep->num);
71121 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
71122 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
71123 + return;
71124 +
71125 +report:
71126 + pax_report_usercopy(ptr, n, to, type);
71127 +#endif
71128 +
71129 +}
71130 +EXPORT_SYMBOL(check_object_size);
71131 +
71132 /**
71133 * ksize - get the actual amount of memory allocated for a given object
71134 * @objp: Pointer to the object
71135 diff --git a/mm/slob.c b/mm/slob.c
71136 index 8105be4..e045f96 100644
71137 --- a/mm/slob.c
71138 +++ b/mm/slob.c
71139 @@ -29,7 +29,7 @@
71140 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
71141 * alloc_pages() directly, allocating compound pages so the page order
71142 * does not have to be separately tracked, and also stores the exact
71143 - * allocation size in page->private so that it can be used to accurately
71144 + * allocation size in slob_page->size so that it can be used to accurately
71145 * provide ksize(). These objects are detected in kfree() because slob_page()
71146 * is false for them.
71147 *
71148 @@ -58,6 +58,7 @@
71149 */
71150
71151 #include <linux/kernel.h>
71152 +#include <linux/sched.h>
71153 #include <linux/slab.h>
71154 #include <linux/mm.h>
71155 #include <linux/swap.h> /* struct reclaim_state */
71156 @@ -102,7 +103,8 @@ struct slob_page {
71157 unsigned long flags; /* mandatory */
71158 atomic_t _count; /* mandatory */
71159 slobidx_t units; /* free units left in page */
71160 - unsigned long pad[2];
71161 + unsigned long pad[1];
71162 + unsigned long size; /* size when >=PAGE_SIZE */
71163 slob_t *free; /* first free slob_t in page */
71164 struct list_head list; /* linked list of free pages */
71165 };
71166 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
71167 */
71168 static inline int is_slob_page(struct slob_page *sp)
71169 {
71170 - return PageSlab((struct page *)sp);
71171 + return PageSlab((struct page *)sp) && !sp->size;
71172 }
71173
71174 static inline void set_slob_page(struct slob_page *sp)
71175 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
71176
71177 static inline struct slob_page *slob_page(const void *addr)
71178 {
71179 - return (struct slob_page *)virt_to_page(addr);
71180 + return (struct slob_page *)virt_to_head_page(addr);
71181 }
71182
71183 /*
71184 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
71185 /*
71186 * Return the size of a slob block.
71187 */
71188 -static slobidx_t slob_units(slob_t *s)
71189 +static slobidx_t slob_units(const slob_t *s)
71190 {
71191 if (s->units > 0)
71192 return s->units;
71193 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
71194 /*
71195 * Return the next free slob block pointer after this one.
71196 */
71197 -static slob_t *slob_next(slob_t *s)
71198 +static slob_t *slob_next(const slob_t *s)
71199 {
71200 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
71201 slobidx_t next;
71202 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
71203 /*
71204 * Returns true if s is the last free block in its page.
71205 */
71206 -static int slob_last(slob_t *s)
71207 +static int slob_last(const slob_t *s)
71208 {
71209 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
71210 }
71211 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
71212 if (!page)
71213 return NULL;
71214
71215 + set_slob_page(page);
71216 return page_address(page);
71217 }
71218
71219 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
71220 if (!b)
71221 return NULL;
71222 sp = slob_page(b);
71223 - set_slob_page(sp);
71224
71225 spin_lock_irqsave(&slob_lock, flags);
71226 sp->units = SLOB_UNITS(PAGE_SIZE);
71227 sp->free = b;
71228 + sp->size = 0;
71229 INIT_LIST_HEAD(&sp->list);
71230 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
71231 set_slob_page_free(sp, slob_list);
71232 @@ -476,10 +479,9 @@ out:
71233 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
71234 */
71235
71236 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71237 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
71238 {
71239 - unsigned int *m;
71240 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71241 + slob_t *m;
71242 void *ret;
71243
71244 gfp &= gfp_allowed_mask;
71245 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71246
71247 if (!m)
71248 return NULL;
71249 - *m = size;
71250 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
71251 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
71252 + m[0].units = size;
71253 + m[1].units = align;
71254 ret = (void *)m + align;
71255
71256 trace_kmalloc_node(_RET_IP_, ret,
71257 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71258 gfp |= __GFP_COMP;
71259 ret = slob_new_pages(gfp, order, node);
71260 if (ret) {
71261 - struct page *page;
71262 - page = virt_to_page(ret);
71263 - page->private = size;
71264 + struct slob_page *sp;
71265 + sp = slob_page(ret);
71266 + sp->size = size;
71267 }
71268
71269 trace_kmalloc_node(_RET_IP_, ret,
71270 size, PAGE_SIZE << order, gfp, node);
71271 }
71272
71273 - kmemleak_alloc(ret, size, 1, gfp);
71274 + return ret;
71275 +}
71276 +
71277 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71278 +{
71279 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71280 + void *ret = __kmalloc_node_align(size, gfp, node, align);
71281 +
71282 + if (!ZERO_OR_NULL_PTR(ret))
71283 + kmemleak_alloc(ret, size, 1, gfp);
71284 return ret;
71285 }
71286 EXPORT_SYMBOL(__kmalloc_node);
71287 @@ -533,13 +547,92 @@ void kfree(const void *block)
71288 sp = slob_page(block);
71289 if (is_slob_page(sp)) {
71290 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71291 - unsigned int *m = (unsigned int *)(block - align);
71292 - slob_free(m, *m + align);
71293 - } else
71294 + slob_t *m = (slob_t *)(block - align);
71295 + slob_free(m, m[0].units + align);
71296 + } else {
71297 + clear_slob_page(sp);
71298 + free_slob_page(sp);
71299 + sp->size = 0;
71300 put_page(&sp->page);
71301 + }
71302 }
71303 EXPORT_SYMBOL(kfree);
71304
71305 +void check_object_size(const void *ptr, unsigned long n, bool to)
71306 +{
71307 +
71308 +#ifdef CONFIG_PAX_USERCOPY
71309 + struct slob_page *sp;
71310 + const slob_t *free;
71311 + const void *base;
71312 + unsigned long flags;
71313 + const char *type;
71314 +
71315 + if (!n)
71316 + return;
71317 +
71318 + type = "<null>";
71319 + if (ZERO_OR_NULL_PTR(ptr))
71320 + goto report;
71321 +
71322 + if (!virt_addr_valid(ptr))
71323 + return;
71324 +
71325 + type = "<process stack>";
71326 + sp = slob_page(ptr);
71327 + if (!PageSlab((struct page *)sp)) {
71328 + if (object_is_on_stack(ptr, n) == -1)
71329 + goto report;
71330 + return;
71331 + }
71332 +
71333 + type = "<slob>";
71334 + if (sp->size) {
71335 + base = page_address(&sp->page);
71336 + if (base <= ptr && n <= sp->size - (ptr - base))
71337 + return;
71338 + goto report;
71339 + }
71340 +
71341 + /* some tricky double walking to find the chunk */
71342 + spin_lock_irqsave(&slob_lock, flags);
71343 + base = (void *)((unsigned long)ptr & PAGE_MASK);
71344 + free = sp->free;
71345 +
71346 + while (!slob_last(free) && (void *)free <= ptr) {
71347 + base = free + slob_units(free);
71348 + free = slob_next(free);
71349 + }
71350 +
71351 + while (base < (void *)free) {
71352 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71353 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
71354 + int offset;
71355 +
71356 + if (ptr < base + align)
71357 + break;
71358 +
71359 + offset = ptr - base - align;
71360 + if (offset >= m) {
71361 + base += size;
71362 + continue;
71363 + }
71364 +
71365 + if (n > m - offset)
71366 + break;
71367 +
71368 + spin_unlock_irqrestore(&slob_lock, flags);
71369 + return;
71370 + }
71371 +
71372 + spin_unlock_irqrestore(&slob_lock, flags);
71373 +report:
71374 + pax_report_usercopy(ptr, n, to, type);
71375 +#endif
71376 +
71377 +}
71378 +EXPORT_SYMBOL(check_object_size);
71379 +
71380 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71381 size_t ksize(const void *block)
71382 {
71383 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
71384 sp = slob_page(block);
71385 if (is_slob_page(sp)) {
71386 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71387 - unsigned int *m = (unsigned int *)(block - align);
71388 - return SLOB_UNITS(*m) * SLOB_UNIT;
71389 + slob_t *m = (slob_t *)(block - align);
71390 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71391 } else
71392 - return sp->page.private;
71393 + return sp->size;
71394 }
71395 EXPORT_SYMBOL(ksize);
71396
71397 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71398 {
71399 struct kmem_cache *c;
71400
71401 +#ifdef CONFIG_PAX_USERCOPY
71402 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
71403 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71404 +#else
71405 c = slob_alloc(sizeof(struct kmem_cache),
71406 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71407 +#endif
71408
71409 if (c) {
71410 c->name = name;
71411 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71412
71413 lockdep_trace_alloc(flags);
71414
71415 +#ifdef CONFIG_PAX_USERCOPY
71416 + b = __kmalloc_node_align(c->size, flags, node, c->align);
71417 +#else
71418 if (c->size < PAGE_SIZE) {
71419 b = slob_alloc(c->size, flags, c->align, node);
71420 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71421 SLOB_UNITS(c->size) * SLOB_UNIT,
71422 flags, node);
71423 } else {
71424 + struct slob_page *sp;
71425 +
71426 b = slob_new_pages(flags, get_order(c->size), node);
71427 + sp = slob_page(b);
71428 + sp->size = c->size;
71429 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71430 PAGE_SIZE << get_order(c->size),
71431 flags, node);
71432 }
71433 +#endif
71434
71435 if (c->ctor)
71436 c->ctor(b);
71437 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71438
71439 static void __kmem_cache_free(void *b, int size)
71440 {
71441 - if (size < PAGE_SIZE)
71442 + struct slob_page *sp = slob_page(b);
71443 +
71444 + if (is_slob_page(sp))
71445 slob_free(b, size);
71446 - else
71447 + else {
71448 + clear_slob_page(sp);
71449 + free_slob_page(sp);
71450 + sp->size = 0;
71451 slob_free_pages(b, get_order(size));
71452 + }
71453 }
71454
71455 static void kmem_rcu_free(struct rcu_head *head)
71456 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71457
71458 void kmem_cache_free(struct kmem_cache *c, void *b)
71459 {
71460 + int size = c->size;
71461 +
71462 +#ifdef CONFIG_PAX_USERCOPY
71463 + if (size + c->align < PAGE_SIZE) {
71464 + size += c->align;
71465 + b -= c->align;
71466 + }
71467 +#endif
71468 +
71469 kmemleak_free_recursive(b, c->flags);
71470 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71471 struct slob_rcu *slob_rcu;
71472 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71473 - slob_rcu->size = c->size;
71474 + slob_rcu = b + (size - sizeof(struct slob_rcu));
71475 + slob_rcu->size = size;
71476 call_rcu(&slob_rcu->head, kmem_rcu_free);
71477 } else {
71478 - __kmem_cache_free(b, c->size);
71479 + __kmem_cache_free(b, size);
71480 }
71481
71482 +#ifdef CONFIG_PAX_USERCOPY
71483 + trace_kfree(_RET_IP_, b);
71484 +#else
71485 trace_kmem_cache_free(_RET_IP_, b);
71486 +#endif
71487 +
71488 }
71489 EXPORT_SYMBOL(kmem_cache_free);
71490
71491 diff --git a/mm/slub.c b/mm/slub.c
71492 index 1a919f0..1739c9b 100644
71493 --- a/mm/slub.c
71494 +++ b/mm/slub.c
71495 @@ -208,7 +208,7 @@ struct track {
71496
71497 enum track_item { TRACK_ALLOC, TRACK_FREE };
71498
71499 -#ifdef CONFIG_SYSFS
71500 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71501 static int sysfs_slab_add(struct kmem_cache *);
71502 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71503 static void sysfs_slab_remove(struct kmem_cache *);
71504 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71505 if (!t->addr)
71506 return;
71507
71508 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71509 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71510 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71511 #ifdef CONFIG_STACKTRACE
71512 {
71513 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71514
71515 page = virt_to_head_page(x);
71516
71517 + BUG_ON(!PageSlab(page));
71518 +
71519 slab_free(s, page, x, _RET_IP_);
71520
71521 trace_kmem_cache_free(_RET_IP_, x);
71522 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
71523 * Merge control. If this is set then no merging of slab caches will occur.
71524 * (Could be removed. This was introduced to pacify the merge skeptics.)
71525 */
71526 -static int slub_nomerge;
71527 +static int slub_nomerge = 1;
71528
71529 /*
71530 * Calculate the order of allocation given an slab object size.
71531 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71532 else
71533 s->cpu_partial = 30;
71534
71535 - s->refcount = 1;
71536 + atomic_set(&s->refcount, 1);
71537 #ifdef CONFIG_NUMA
71538 s->remote_node_defrag_ratio = 1000;
71539 #endif
71540 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71541 void kmem_cache_destroy(struct kmem_cache *s)
71542 {
71543 down_write(&slub_lock);
71544 - s->refcount--;
71545 - if (!s->refcount) {
71546 + if (atomic_dec_and_test(&s->refcount)) {
71547 list_del(&s->list);
71548 up_write(&slub_lock);
71549 if (kmem_cache_close(s)) {
71550 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71551 EXPORT_SYMBOL(__kmalloc_node);
71552 #endif
71553
71554 +void check_object_size(const void *ptr, unsigned long n, bool to)
71555 +{
71556 +
71557 +#ifdef CONFIG_PAX_USERCOPY
71558 + struct page *page;
71559 + struct kmem_cache *s = NULL;
71560 + unsigned long offset;
71561 + const char *type;
71562 +
71563 + if (!n)
71564 + return;
71565 +
71566 + type = "<null>";
71567 + if (ZERO_OR_NULL_PTR(ptr))
71568 + goto report;
71569 +
71570 + if (!virt_addr_valid(ptr))
71571 + return;
71572 +
71573 + page = virt_to_head_page(ptr);
71574 +
71575 + type = "<process stack>";
71576 + if (!PageSlab(page)) {
71577 + if (object_is_on_stack(ptr, n) == -1)
71578 + goto report;
71579 + return;
71580 + }
71581 +
71582 + s = page->slab;
71583 + type = s->name;
71584 + if (!(s->flags & SLAB_USERCOPY))
71585 + goto report;
71586 +
71587 + offset = (ptr - page_address(page)) % s->size;
71588 + if (offset <= s->objsize && n <= s->objsize - offset)
71589 + return;
71590 +
71591 +report:
71592 + pax_report_usercopy(ptr, n, to, type);
71593 +#endif
71594 +
71595 +}
71596 +EXPORT_SYMBOL(check_object_size);
71597 +
71598 size_t ksize(const void *object)
71599 {
71600 struct page *page;
71601 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71602 int node;
71603
71604 list_add(&s->list, &slab_caches);
71605 - s->refcount = -1;
71606 + atomic_set(&s->refcount, -1);
71607
71608 for_each_node_state(node, N_NORMAL_MEMORY) {
71609 struct kmem_cache_node *n = get_node(s, node);
71610 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71611
71612 /* Caches that are not of the two-to-the-power-of size */
71613 if (KMALLOC_MIN_SIZE <= 32) {
71614 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71615 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71616 caches++;
71617 }
71618
71619 if (KMALLOC_MIN_SIZE <= 64) {
71620 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71621 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71622 caches++;
71623 }
71624
71625 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71626 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71627 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71628 caches++;
71629 }
71630
71631 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71632 /*
71633 * We may have set a slab to be unmergeable during bootstrap.
71634 */
71635 - if (s->refcount < 0)
71636 + if (atomic_read(&s->refcount) < 0)
71637 return 1;
71638
71639 return 0;
71640 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71641 down_write(&slub_lock);
71642 s = find_mergeable(size, align, flags, name, ctor);
71643 if (s) {
71644 - s->refcount++;
71645 + atomic_inc(&s->refcount);
71646 /*
71647 * Adjust the object sizes so that we clear
71648 * the complete object on kzalloc.
71649 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71650 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71651
71652 if (sysfs_slab_alias(s, name)) {
71653 - s->refcount--;
71654 + atomic_dec(&s->refcount);
71655 goto err;
71656 }
71657 up_write(&slub_lock);
71658 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71659 }
71660 #endif
71661
71662 -#ifdef CONFIG_SYSFS
71663 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71664 static int count_inuse(struct page *page)
71665 {
71666 return page->inuse;
71667 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71668 validate_slab_cache(kmalloc_caches[9]);
71669 }
71670 #else
71671 -#ifdef CONFIG_SYSFS
71672 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71673 static void resiliency_test(void) {};
71674 #endif
71675 #endif
71676
71677 -#ifdef CONFIG_SYSFS
71678 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71679 enum slab_stat_type {
71680 SL_ALL, /* All slabs */
71681 SL_PARTIAL, /* Only partially allocated slabs */
71682 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71683
71684 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71685 {
71686 - return sprintf(buf, "%d\n", s->refcount - 1);
71687 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71688 }
71689 SLAB_ATTR_RO(aliases);
71690
71691 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71692 return name;
71693 }
71694
71695 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71696 static int sysfs_slab_add(struct kmem_cache *s)
71697 {
71698 int err;
71699 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71700 kobject_del(&s->kobj);
71701 kobject_put(&s->kobj);
71702 }
71703 +#endif
71704
71705 /*
71706 * Need to buffer aliases during bootup until sysfs becomes
71707 @@ -5298,6 +5345,7 @@ struct saved_alias {
71708
71709 static struct saved_alias *alias_list;
71710
71711 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71712 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71713 {
71714 struct saved_alias *al;
71715 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71716 alias_list = al;
71717 return 0;
71718 }
71719 +#endif
71720
71721 static int __init slab_sysfs_init(void)
71722 {
71723 diff --git a/mm/swap.c b/mm/swap.c
71724 index 55b266d..a532537 100644
71725 --- a/mm/swap.c
71726 +++ b/mm/swap.c
71727 @@ -31,6 +31,7 @@
71728 #include <linux/backing-dev.h>
71729 #include <linux/memcontrol.h>
71730 #include <linux/gfp.h>
71731 +#include <linux/hugetlb.h>
71732
71733 #include "internal.h"
71734
71735 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71736
71737 __page_cache_release(page);
71738 dtor = get_compound_page_dtor(page);
71739 + if (!PageHuge(page))
71740 + BUG_ON(dtor != free_compound_page);
71741 (*dtor)(page);
71742 }
71743
71744 diff --git a/mm/swapfile.c b/mm/swapfile.c
71745 index b1cd120..aaae885 100644
71746 --- a/mm/swapfile.c
71747 +++ b/mm/swapfile.c
71748 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71749
71750 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71751 /* Activity counter to indicate that a swapon or swapoff has occurred */
71752 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71753 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71754
71755 static inline unsigned char swap_count(unsigned char ent)
71756 {
71757 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71758 }
71759 filp_close(swap_file, NULL);
71760 err = 0;
71761 - atomic_inc(&proc_poll_event);
71762 + atomic_inc_unchecked(&proc_poll_event);
71763 wake_up_interruptible(&proc_poll_wait);
71764
71765 out_dput:
71766 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71767
71768 poll_wait(file, &proc_poll_wait, wait);
71769
71770 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
71771 - seq->poll_event = atomic_read(&proc_poll_event);
71772 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71773 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71774 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71775 }
71776
71777 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71778 return ret;
71779
71780 seq = file->private_data;
71781 - seq->poll_event = atomic_read(&proc_poll_event);
71782 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71783 return 0;
71784 }
71785
71786 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71787 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71788
71789 mutex_unlock(&swapon_mutex);
71790 - atomic_inc(&proc_poll_event);
71791 + atomic_inc_unchecked(&proc_poll_event);
71792 wake_up_interruptible(&proc_poll_wait);
71793
71794 if (S_ISREG(inode->i_mode))
71795 diff --git a/mm/util.c b/mm/util.c
71796 index 136ac4f..5117eef 100644
71797 --- a/mm/util.c
71798 +++ b/mm/util.c
71799 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71800 * allocated buffer. Use this if you don't want to free the buffer immediately
71801 * like, for example, with RCU.
71802 */
71803 +#undef __krealloc
71804 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71805 {
71806 void *ret;
71807 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71808 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71809 * %NULL pointer, the object pointed to is freed.
71810 */
71811 +#undef krealloc
71812 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71813 {
71814 void *ret;
71815 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71816 void arch_pick_mmap_layout(struct mm_struct *mm)
71817 {
71818 mm->mmap_base = TASK_UNMAPPED_BASE;
71819 +
71820 +#ifdef CONFIG_PAX_RANDMMAP
71821 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71822 + mm->mmap_base += mm->delta_mmap;
71823 +#endif
71824 +
71825 mm->get_unmapped_area = arch_get_unmapped_area;
71826 mm->unmap_area = arch_unmap_area;
71827 }
71828 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71829 index 27be2f0..0aef2c2 100644
71830 --- a/mm/vmalloc.c
71831 +++ b/mm/vmalloc.c
71832 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71833
71834 pte = pte_offset_kernel(pmd, addr);
71835 do {
71836 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71837 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71838 +
71839 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71840 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71841 + BUG_ON(!pte_exec(*pte));
71842 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71843 + continue;
71844 + }
71845 +#endif
71846 +
71847 + {
71848 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71849 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71850 + }
71851 } while (pte++, addr += PAGE_SIZE, addr != end);
71852 }
71853
71854 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71855 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71856 {
71857 pte_t *pte;
71858 + int ret = -ENOMEM;
71859
71860 /*
71861 * nr is a running index into the array which helps higher level
71862 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71863 pte = pte_alloc_kernel(pmd, addr);
71864 if (!pte)
71865 return -ENOMEM;
71866 +
71867 + pax_open_kernel();
71868 do {
71869 struct page *page = pages[*nr];
71870
71871 - if (WARN_ON(!pte_none(*pte)))
71872 - return -EBUSY;
71873 - if (WARN_ON(!page))
71874 - return -ENOMEM;
71875 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71876 + if (pgprot_val(prot) & _PAGE_NX)
71877 +#endif
71878 +
71879 + if (WARN_ON(!pte_none(*pte))) {
71880 + ret = -EBUSY;
71881 + goto out;
71882 + }
71883 + if (WARN_ON(!page)) {
71884 + ret = -ENOMEM;
71885 + goto out;
71886 + }
71887 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71888 (*nr)++;
71889 } while (pte++, addr += PAGE_SIZE, addr != end);
71890 - return 0;
71891 + ret = 0;
71892 +out:
71893 + pax_close_kernel();
71894 + return ret;
71895 }
71896
71897 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71898 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71899 * and fall back on vmalloc() if that fails. Others
71900 * just put it in the vmalloc space.
71901 */
71902 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71903 +#ifdef CONFIG_MODULES
71904 +#ifdef MODULES_VADDR
71905 unsigned long addr = (unsigned long)x;
71906 if (addr >= MODULES_VADDR && addr < MODULES_END)
71907 return 1;
71908 #endif
71909 +
71910 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71911 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71912 + return 1;
71913 +#endif
71914 +
71915 +#endif
71916 +
71917 return is_vmalloc_addr(x);
71918 }
71919
71920 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71921
71922 if (!pgd_none(*pgd)) {
71923 pud_t *pud = pud_offset(pgd, addr);
71924 +#ifdef CONFIG_X86
71925 + if (!pud_large(*pud))
71926 +#endif
71927 if (!pud_none(*pud)) {
71928 pmd_t *pmd = pmd_offset(pud, addr);
71929 +#ifdef CONFIG_X86
71930 + if (!pmd_large(*pmd))
71931 +#endif
71932 if (!pmd_none(*pmd)) {
71933 pte_t *ptep, pte;
71934
71935 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71936 struct vm_struct *area;
71937
71938 BUG_ON(in_interrupt());
71939 +
71940 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71941 + if (flags & VM_KERNEXEC) {
71942 + if (start != VMALLOC_START || end != VMALLOC_END)
71943 + return NULL;
71944 + start = (unsigned long)MODULES_EXEC_VADDR;
71945 + end = (unsigned long)MODULES_EXEC_END;
71946 + }
71947 +#endif
71948 +
71949 if (flags & VM_IOREMAP) {
71950 int bit = fls(size);
71951
71952 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71953 if (count > totalram_pages)
71954 return NULL;
71955
71956 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71957 + if (!(pgprot_val(prot) & _PAGE_NX))
71958 + flags |= VM_KERNEXEC;
71959 +#endif
71960 +
71961 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71962 __builtin_return_address(0));
71963 if (!area)
71964 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71965 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71966 goto fail;
71967
71968 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71969 + if (!(pgprot_val(prot) & _PAGE_NX))
71970 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71971 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71972 + else
71973 +#endif
71974 +
71975 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71976 start, end, node, gfp_mask, caller);
71977 if (!area)
71978 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71979 gfp_mask, prot, node, caller);
71980 }
71981
71982 +#undef __vmalloc
71983 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71984 {
71985 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71986 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71987 * For tight control over page level allocator and protection flags
71988 * use __vmalloc() instead.
71989 */
71990 +#undef vmalloc
71991 void *vmalloc(unsigned long size)
71992 {
71993 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71994 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71995 * For tight control over page level allocator and protection flags
71996 * use __vmalloc() instead.
71997 */
71998 +#undef vzalloc
71999 void *vzalloc(unsigned long size)
72000 {
72001 return __vmalloc_node_flags(size, -1,
72002 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
72003 * The resulting memory area is zeroed so it can be mapped to userspace
72004 * without leaking data.
72005 */
72006 +#undef vmalloc_user
72007 void *vmalloc_user(unsigned long size)
72008 {
72009 struct vm_struct *area;
72010 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
72011 * For tight control over page level allocator and protection flags
72012 * use __vmalloc() instead.
72013 */
72014 +#undef vmalloc_node
72015 void *vmalloc_node(unsigned long size, int node)
72016 {
72017 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
72018 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
72019 * For tight control over page level allocator and protection flags
72020 * use __vmalloc_node() instead.
72021 */
72022 +#undef vzalloc_node
72023 void *vzalloc_node(unsigned long size, int node)
72024 {
72025 return __vmalloc_node_flags(size, node,
72026 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
72027 * For tight control over page level allocator and protection flags
72028 * use __vmalloc() instead.
72029 */
72030 -
72031 +#undef vmalloc_exec
72032 void *vmalloc_exec(unsigned long size)
72033 {
72034 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
72035 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
72036 -1, __builtin_return_address(0));
72037 }
72038
72039 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
72040 * Allocate enough 32bit PA addressable pages to cover @size from the
72041 * page level allocator and map them into contiguous kernel virtual space.
72042 */
72043 +#undef vmalloc_32
72044 void *vmalloc_32(unsigned long size)
72045 {
72046 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
72047 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
72048 * The resulting memory area is 32bit addressable and zeroed so it can be
72049 * mapped to userspace without leaking data.
72050 */
72051 +#undef vmalloc_32_user
72052 void *vmalloc_32_user(unsigned long size)
72053 {
72054 struct vm_struct *area;
72055 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
72056 unsigned long uaddr = vma->vm_start;
72057 unsigned long usize = vma->vm_end - vma->vm_start;
72058
72059 + BUG_ON(vma->vm_mirror);
72060 +
72061 if ((PAGE_SIZE-1) & (unsigned long)addr)
72062 return -EINVAL;
72063
72064 diff --git a/mm/vmstat.c b/mm/vmstat.c
72065 index 8fd603b..cf0d930 100644
72066 --- a/mm/vmstat.c
72067 +++ b/mm/vmstat.c
72068 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
72069 *
72070 * vm_stat contains the global counters
72071 */
72072 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72073 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72074 EXPORT_SYMBOL(vm_stat);
72075
72076 #ifdef CONFIG_SMP
72077 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
72078 v = p->vm_stat_diff[i];
72079 p->vm_stat_diff[i] = 0;
72080 local_irq_restore(flags);
72081 - atomic_long_add(v, &zone->vm_stat[i]);
72082 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
72083 global_diff[i] += v;
72084 #ifdef CONFIG_NUMA
72085 /* 3 seconds idle till flush */
72086 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
72087
72088 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
72089 if (global_diff[i])
72090 - atomic_long_add(global_diff[i], &vm_stat[i]);
72091 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
72092 }
72093
72094 #endif
72095 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
72096 start_cpu_timer(cpu);
72097 #endif
72098 #ifdef CONFIG_PROC_FS
72099 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
72100 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
72101 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
72102 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
72103 + {
72104 + mode_t gr_mode = S_IRUGO;
72105 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72106 + gr_mode = S_IRUSR;
72107 +#endif
72108 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
72109 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
72110 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72111 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
72112 +#else
72113 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
72114 +#endif
72115 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
72116 + }
72117 #endif
72118 return 0;
72119 }
72120 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
72121 index 5471628..cef8398 100644
72122 --- a/net/8021q/vlan.c
72123 +++ b/net/8021q/vlan.c
72124 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
72125 err = -EPERM;
72126 if (!capable(CAP_NET_ADMIN))
72127 break;
72128 - if ((args.u.name_type >= 0) &&
72129 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
72130 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
72131 struct vlan_net *vn;
72132
72133 vn = net_generic(net, vlan_net_id);
72134 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
72135 index fdfdb57..38d368c 100644
72136 --- a/net/9p/trans_fd.c
72137 +++ b/net/9p/trans_fd.c
72138 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
72139 oldfs = get_fs();
72140 set_fs(get_ds());
72141 /* The cast to a user pointer is valid due to the set_fs() */
72142 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
72143 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
72144 set_fs(oldfs);
72145
72146 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
72147 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
72148 index f41f026..fe76ea8 100644
72149 --- a/net/atm/atm_misc.c
72150 +++ b/net/atm/atm_misc.c
72151 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
72152 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
72153 return 1;
72154 atm_return(vcc, truesize);
72155 - atomic_inc(&vcc->stats->rx_drop);
72156 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72157 return 0;
72158 }
72159 EXPORT_SYMBOL(atm_charge);
72160 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
72161 }
72162 }
72163 atm_return(vcc, guess);
72164 - atomic_inc(&vcc->stats->rx_drop);
72165 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72166 return NULL;
72167 }
72168 EXPORT_SYMBOL(atm_alloc_charge);
72169 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
72170
72171 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72172 {
72173 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72174 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72175 __SONET_ITEMS
72176 #undef __HANDLE_ITEM
72177 }
72178 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
72179
72180 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72181 {
72182 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72183 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
72184 __SONET_ITEMS
72185 #undef __HANDLE_ITEM
72186 }
72187 diff --git a/net/atm/lec.h b/net/atm/lec.h
72188 index dfc0719..47c5322 100644
72189 --- a/net/atm/lec.h
72190 +++ b/net/atm/lec.h
72191 @@ -48,7 +48,7 @@ struct lane2_ops {
72192 const u8 *tlvs, u32 sizeoftlvs);
72193 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
72194 const u8 *tlvs, u32 sizeoftlvs);
72195 -};
72196 +} __no_const;
72197
72198 /*
72199 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
72200 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
72201 index 0919a88..a23d54e 100644
72202 --- a/net/atm/mpc.h
72203 +++ b/net/atm/mpc.h
72204 @@ -33,7 +33,7 @@ struct mpoa_client {
72205 struct mpc_parameters parameters; /* parameters for this client */
72206
72207 const struct net_device_ops *old_ops;
72208 - struct net_device_ops new_ops;
72209 + net_device_ops_no_const new_ops;
72210 };
72211
72212
72213 diff --git a/net/atm/proc.c b/net/atm/proc.c
72214 index 0d020de..011c7bb 100644
72215 --- a/net/atm/proc.c
72216 +++ b/net/atm/proc.c
72217 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
72218 const struct k_atm_aal_stats *stats)
72219 {
72220 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
72221 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
72222 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
72223 - atomic_read(&stats->rx_drop));
72224 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
72225 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
72226 + atomic_read_unchecked(&stats->rx_drop));
72227 }
72228
72229 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
72230 diff --git a/net/atm/resources.c b/net/atm/resources.c
72231 index 23f45ce..c748f1a 100644
72232 --- a/net/atm/resources.c
72233 +++ b/net/atm/resources.c
72234 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
72235 static void copy_aal_stats(struct k_atm_aal_stats *from,
72236 struct atm_aal_stats *to)
72237 {
72238 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72239 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72240 __AAL_STAT_ITEMS
72241 #undef __HANDLE_ITEM
72242 }
72243 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
72244 static void subtract_aal_stats(struct k_atm_aal_stats *from,
72245 struct atm_aal_stats *to)
72246 {
72247 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72248 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
72249 __AAL_STAT_ITEMS
72250 #undef __HANDLE_ITEM
72251 }
72252 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
72253 index 3512e25..2b33401 100644
72254 --- a/net/batman-adv/bat_iv_ogm.c
72255 +++ b/net/batman-adv/bat_iv_ogm.c
72256 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72257
72258 /* change sequence number to network order */
72259 batman_ogm_packet->seqno =
72260 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
72261 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
72262
72263 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
72264 batman_ogm_packet->tt_crc = htons((uint16_t)
72265 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72266 else
72267 batman_ogm_packet->gw_flags = NO_FLAGS;
72268
72269 - atomic_inc(&hard_iface->seqno);
72270 + atomic_inc_unchecked(&hard_iface->seqno);
72271
72272 slide_own_bcast_window(hard_iface);
72273 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
72274 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
72275 return;
72276
72277 /* could be changed by schedule_own_packet() */
72278 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
72279 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
72280
72281 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
72282
72283 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
72284 index 7704df4..beb4e16 100644
72285 --- a/net/batman-adv/hard-interface.c
72286 +++ b/net/batman-adv/hard-interface.c
72287 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
72288 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
72289 dev_add_pack(&hard_iface->batman_adv_ptype);
72290
72291 - atomic_set(&hard_iface->seqno, 1);
72292 - atomic_set(&hard_iface->frag_seqno, 1);
72293 + atomic_set_unchecked(&hard_iface->seqno, 1);
72294 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
72295 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
72296 hard_iface->net_dev->name);
72297
72298 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
72299 index f9cc957..efd9dae 100644
72300 --- a/net/batman-adv/soft-interface.c
72301 +++ b/net/batman-adv/soft-interface.c
72302 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
72303
72304 /* set broadcast sequence number */
72305 bcast_packet->seqno =
72306 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72307 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72308
72309 add_bcast_packet_to_list(bat_priv, skb, 1);
72310
72311 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
72312 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72313
72314 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72315 - atomic_set(&bat_priv->bcast_seqno, 1);
72316 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72317 atomic_set(&bat_priv->ttvn, 0);
72318 atomic_set(&bat_priv->tt_local_changes, 0);
72319 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72320 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
72321 index ab8d0fe..ceba3fd 100644
72322 --- a/net/batman-adv/types.h
72323 +++ b/net/batman-adv/types.h
72324 @@ -38,8 +38,8 @@ struct hard_iface {
72325 int16_t if_num;
72326 char if_status;
72327 struct net_device *net_dev;
72328 - atomic_t seqno;
72329 - atomic_t frag_seqno;
72330 + atomic_unchecked_t seqno;
72331 + atomic_unchecked_t frag_seqno;
72332 unsigned char *packet_buff;
72333 int packet_len;
72334 struct kobject *hardif_obj;
72335 @@ -154,7 +154,7 @@ struct bat_priv {
72336 atomic_t orig_interval; /* uint */
72337 atomic_t hop_penalty; /* uint */
72338 atomic_t log_level; /* uint */
72339 - atomic_t bcast_seqno;
72340 + atomic_unchecked_t bcast_seqno;
72341 atomic_t bcast_queue_left;
72342 atomic_t batman_queue_left;
72343 atomic_t ttvn; /* translation table version number */
72344 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
72345 index 07d1c1d..7e9bea9 100644
72346 --- a/net/batman-adv/unicast.c
72347 +++ b/net/batman-adv/unicast.c
72348 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
72349 frag1->flags = UNI_FRAG_HEAD | large_tail;
72350 frag2->flags = large_tail;
72351
72352 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72353 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72354 frag1->seqno = htons(seqno - 1);
72355 frag2->seqno = htons(seqno);
72356
72357 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
72358 index c1c597e..05ebb40 100644
72359 --- a/net/bluetooth/hci_conn.c
72360 +++ b/net/bluetooth/hci_conn.c
72361 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
72362 memset(&cp, 0, sizeof(cp));
72363
72364 cp.handle = cpu_to_le16(conn->handle);
72365 - memcpy(cp.ltk, ltk, sizeof(ltk));
72366 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72367
72368 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72369 }
72370 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72371 index 17b5b1c..826d872 100644
72372 --- a/net/bluetooth/l2cap_core.c
72373 +++ b/net/bluetooth/l2cap_core.c
72374 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72375 break;
72376
72377 case L2CAP_CONF_RFC:
72378 - if (olen == sizeof(rfc))
72379 - memcpy(&rfc, (void *)val, olen);
72380 + if (olen != sizeof(rfc))
72381 + break;
72382 +
72383 + memcpy(&rfc, (void *)val, olen);
72384
72385 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72386 rfc.mode != chan->mode)
72387 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72388
72389 switch (type) {
72390 case L2CAP_CONF_RFC:
72391 - if (olen == sizeof(rfc))
72392 - memcpy(&rfc, (void *)val, olen);
72393 + if (olen != sizeof(rfc))
72394 + break;
72395 +
72396 + memcpy(&rfc, (void *)val, olen);
72397 goto done;
72398 }
72399 }
72400 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72401 index a5f4e57..910ee6d 100644
72402 --- a/net/bridge/br_multicast.c
72403 +++ b/net/bridge/br_multicast.c
72404 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72405 nexthdr = ip6h->nexthdr;
72406 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72407
72408 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72409 + if (nexthdr != IPPROTO_ICMPV6)
72410 return 0;
72411
72412 /* Okay, we found ICMPv6 header */
72413 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72414 index 5864cc4..121f3a3 100644
72415 --- a/net/bridge/netfilter/ebtables.c
72416 +++ b/net/bridge/netfilter/ebtables.c
72417 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72418 tmp.valid_hooks = t->table->valid_hooks;
72419 }
72420 mutex_unlock(&ebt_mutex);
72421 - if (copy_to_user(user, &tmp, *len) != 0){
72422 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72423 BUGPRINT("c2u Didn't work\n");
72424 ret = -EFAULT;
72425 break;
72426 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72427 index a986280..13444a1 100644
72428 --- a/net/caif/caif_socket.c
72429 +++ b/net/caif/caif_socket.c
72430 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72431 #ifdef CONFIG_DEBUG_FS
72432 struct debug_fs_counter {
72433 atomic_t caif_nr_socks;
72434 - atomic_t caif_sock_create;
72435 - atomic_t num_connect_req;
72436 - atomic_t num_connect_resp;
72437 - atomic_t num_connect_fail_resp;
72438 - atomic_t num_disconnect;
72439 - atomic_t num_remote_shutdown_ind;
72440 - atomic_t num_tx_flow_off_ind;
72441 - atomic_t num_tx_flow_on_ind;
72442 - atomic_t num_rx_flow_off;
72443 - atomic_t num_rx_flow_on;
72444 + atomic_unchecked_t caif_sock_create;
72445 + atomic_unchecked_t num_connect_req;
72446 + atomic_unchecked_t num_connect_resp;
72447 + atomic_unchecked_t num_connect_fail_resp;
72448 + atomic_unchecked_t num_disconnect;
72449 + atomic_unchecked_t num_remote_shutdown_ind;
72450 + atomic_unchecked_t num_tx_flow_off_ind;
72451 + atomic_unchecked_t num_tx_flow_on_ind;
72452 + atomic_unchecked_t num_rx_flow_off;
72453 + atomic_unchecked_t num_rx_flow_on;
72454 };
72455 static struct debug_fs_counter cnt;
72456 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72457 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72458 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72459 #else
72460 #define dbfs_atomic_inc(v) 0
72461 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72462 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72463 sk_rcvbuf_lowwater(cf_sk));
72464 set_rx_flow_off(cf_sk);
72465 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72466 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72467 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72468 }
72469
72470 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72471 set_rx_flow_off(cf_sk);
72472 if (net_ratelimit())
72473 pr_debug("sending flow OFF due to rmem_schedule\n");
72474 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72475 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72476 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72477 }
72478 skb->dev = NULL;
72479 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72480 switch (flow) {
72481 case CAIF_CTRLCMD_FLOW_ON_IND:
72482 /* OK from modem to start sending again */
72483 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72484 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72485 set_tx_flow_on(cf_sk);
72486 cf_sk->sk.sk_state_change(&cf_sk->sk);
72487 break;
72488
72489 case CAIF_CTRLCMD_FLOW_OFF_IND:
72490 /* Modem asks us to shut up */
72491 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72492 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72493 set_tx_flow_off(cf_sk);
72494 cf_sk->sk.sk_state_change(&cf_sk->sk);
72495 break;
72496 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72497 /* We're now connected */
72498 caif_client_register_refcnt(&cf_sk->layer,
72499 cfsk_hold, cfsk_put);
72500 - dbfs_atomic_inc(&cnt.num_connect_resp);
72501 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72502 cf_sk->sk.sk_state = CAIF_CONNECTED;
72503 set_tx_flow_on(cf_sk);
72504 cf_sk->sk.sk_state_change(&cf_sk->sk);
72505 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72506
72507 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72508 /* Connect request failed */
72509 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72510 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72511 cf_sk->sk.sk_err = ECONNREFUSED;
72512 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72513 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72514 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72515
72516 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72517 /* Modem has closed this connection, or device is down. */
72518 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72519 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72520 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72521 cf_sk->sk.sk_err = ECONNRESET;
72522 set_rx_flow_on(cf_sk);
72523 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72524 return;
72525
72526 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72527 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
72528 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72529 set_rx_flow_on(cf_sk);
72530 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72531 }
72532 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72533 /*ifindex = id of the interface.*/
72534 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72535
72536 - dbfs_atomic_inc(&cnt.num_connect_req);
72537 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72538 cf_sk->layer.receive = caif_sktrecv_cb;
72539
72540 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72541 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72542 spin_unlock_bh(&sk->sk_receive_queue.lock);
72543 sock->sk = NULL;
72544
72545 - dbfs_atomic_inc(&cnt.num_disconnect);
72546 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72547
72548 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72549 if (cf_sk->debugfs_socket_dir != NULL)
72550 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72551 cf_sk->conn_req.protocol = protocol;
72552 /* Increase the number of sockets created. */
72553 dbfs_atomic_inc(&cnt.caif_nr_socks);
72554 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
72555 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72556 #ifdef CONFIG_DEBUG_FS
72557 if (!IS_ERR(debugfsdir)) {
72558
72559 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72560 index 5cf5222..6f704ad 100644
72561 --- a/net/caif/cfctrl.c
72562 +++ b/net/caif/cfctrl.c
72563 @@ -9,6 +9,7 @@
72564 #include <linux/stddef.h>
72565 #include <linux/spinlock.h>
72566 #include <linux/slab.h>
72567 +#include <linux/sched.h>
72568 #include <net/caif/caif_layer.h>
72569 #include <net/caif/cfpkt.h>
72570 #include <net/caif/cfctrl.h>
72571 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72572 memset(&dev_info, 0, sizeof(dev_info));
72573 dev_info.id = 0xff;
72574 cfsrvl_init(&this->serv, 0, &dev_info, false);
72575 - atomic_set(&this->req_seq_no, 1);
72576 - atomic_set(&this->rsp_seq_no, 1);
72577 + atomic_set_unchecked(&this->req_seq_no, 1);
72578 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72579 this->serv.layer.receive = cfctrl_recv;
72580 sprintf(this->serv.layer.name, "ctrl");
72581 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72582 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72583 struct cfctrl_request_info *req)
72584 {
72585 spin_lock_bh(&ctrl->info_list_lock);
72586 - atomic_inc(&ctrl->req_seq_no);
72587 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72588 + atomic_inc_unchecked(&ctrl->req_seq_no);
72589 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72590 list_add_tail(&req->list, &ctrl->list);
72591 spin_unlock_bh(&ctrl->info_list_lock);
72592 }
72593 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72594 if (p != first)
72595 pr_warn("Requests are not received in order\n");
72596
72597 - atomic_set(&ctrl->rsp_seq_no,
72598 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72599 p->sequence_no);
72600 list_del(&p->list);
72601 goto out;
72602 diff --git a/net/can/gw.c b/net/can/gw.c
72603 index 3d79b12..8de85fa 100644
72604 --- a/net/can/gw.c
72605 +++ b/net/can/gw.c
72606 @@ -96,7 +96,7 @@ struct cf_mod {
72607 struct {
72608 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72609 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72610 - } csumfunc;
72611 + } __no_const csumfunc;
72612 };
72613
72614
72615 diff --git a/net/compat.c b/net/compat.c
72616 index 6def90e..c6992fa 100644
72617 --- a/net/compat.c
72618 +++ b/net/compat.c
72619 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72620 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72621 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72622 return -EFAULT;
72623 - kmsg->msg_name = compat_ptr(tmp1);
72624 - kmsg->msg_iov = compat_ptr(tmp2);
72625 - kmsg->msg_control = compat_ptr(tmp3);
72626 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72627 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72628 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72629 return 0;
72630 }
72631
72632 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72633
72634 if (kern_msg->msg_namelen) {
72635 if (mode == VERIFY_READ) {
72636 - int err = move_addr_to_kernel(kern_msg->msg_name,
72637 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72638 kern_msg->msg_namelen,
72639 kern_address);
72640 if (err < 0)
72641 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72642 kern_msg->msg_name = NULL;
72643
72644 tot_len = iov_from_user_compat_to_kern(kern_iov,
72645 - (struct compat_iovec __user *)kern_msg->msg_iov,
72646 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72647 kern_msg->msg_iovlen);
72648 if (tot_len >= 0)
72649 kern_msg->msg_iov = kern_iov;
72650 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72651
72652 #define CMSG_COMPAT_FIRSTHDR(msg) \
72653 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72654 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72655 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72656 (struct compat_cmsghdr __user *)NULL)
72657
72658 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72659 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72660 (ucmlen) <= (unsigned long) \
72661 ((mhdr)->msg_controllen - \
72662 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72663 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72664
72665 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72666 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72667 {
72668 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72669 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72670 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72671 msg->msg_controllen)
72672 return NULL;
72673 return (struct compat_cmsghdr __user *)ptr;
72674 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72675 {
72676 struct compat_timeval ctv;
72677 struct compat_timespec cts[3];
72678 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72679 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72680 struct compat_cmsghdr cmhdr;
72681 int cmlen;
72682
72683 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72684
72685 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72686 {
72687 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72688 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72689 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72690 int fdnum = scm->fp->count;
72691 struct file **fp = scm->fp->fp;
72692 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72693 return -EFAULT;
72694 old_fs = get_fs();
72695 set_fs(KERNEL_DS);
72696 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72697 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72698 set_fs(old_fs);
72699
72700 return err;
72701 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72702 len = sizeof(ktime);
72703 old_fs = get_fs();
72704 set_fs(KERNEL_DS);
72705 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72706 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72707 set_fs(old_fs);
72708
72709 if (!err) {
72710 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72711 case MCAST_JOIN_GROUP:
72712 case MCAST_LEAVE_GROUP:
72713 {
72714 - struct compat_group_req __user *gr32 = (void *)optval;
72715 + struct compat_group_req __user *gr32 = (void __user *)optval;
72716 struct group_req __user *kgr =
72717 compat_alloc_user_space(sizeof(struct group_req));
72718 u32 interface;
72719 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72720 case MCAST_BLOCK_SOURCE:
72721 case MCAST_UNBLOCK_SOURCE:
72722 {
72723 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72724 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72725 struct group_source_req __user *kgsr = compat_alloc_user_space(
72726 sizeof(struct group_source_req));
72727 u32 interface;
72728 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72729 }
72730 case MCAST_MSFILTER:
72731 {
72732 - struct compat_group_filter __user *gf32 = (void *)optval;
72733 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72734 struct group_filter __user *kgf;
72735 u32 interface, fmode, numsrc;
72736
72737 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72738 char __user *optval, int __user *optlen,
72739 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72740 {
72741 - struct compat_group_filter __user *gf32 = (void *)optval;
72742 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72743 struct group_filter __user *kgf;
72744 int __user *koptlen;
72745 u32 interface, fmode, numsrc;
72746 diff --git a/net/core/datagram.c b/net/core/datagram.c
72747 index 68bbf9f..5ef0d12 100644
72748 --- a/net/core/datagram.c
72749 +++ b/net/core/datagram.c
72750 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72751 }
72752
72753 kfree_skb(skb);
72754 - atomic_inc(&sk->sk_drops);
72755 + atomic_inc_unchecked(&sk->sk_drops);
72756 sk_mem_reclaim_partial(sk);
72757
72758 return err;
72759 diff --git a/net/core/dev.c b/net/core/dev.c
72760 index c56cacf..b28e35f 100644
72761 --- a/net/core/dev.c
72762 +++ b/net/core/dev.c
72763 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72764 if (no_module && capable(CAP_NET_ADMIN))
72765 no_module = request_module("netdev-%s", name);
72766 if (no_module && capable(CAP_SYS_MODULE)) {
72767 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72768 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72769 +#else
72770 if (!request_module("%s", name))
72771 pr_err("Loading kernel module for a network device "
72772 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72773 "instead\n", name);
72774 +#endif
72775 }
72776 }
72777 EXPORT_SYMBOL(dev_load);
72778 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72779 {
72780 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72781 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72782 - atomic_long_inc(&dev->rx_dropped);
72783 + atomic_long_inc_unchecked(&dev->rx_dropped);
72784 kfree_skb(skb);
72785 return NET_RX_DROP;
72786 }
72787 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72788 nf_reset(skb);
72789
72790 if (unlikely(!is_skb_forwardable(dev, skb))) {
72791 - atomic_long_inc(&dev->rx_dropped);
72792 + atomic_long_inc_unchecked(&dev->rx_dropped);
72793 kfree_skb(skb);
72794 return NET_RX_DROP;
72795 }
72796 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72797
72798 struct dev_gso_cb {
72799 void (*destructor)(struct sk_buff *skb);
72800 -};
72801 +} __no_const;
72802
72803 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72804
72805 @@ -2970,7 +2974,7 @@ enqueue:
72806
72807 local_irq_restore(flags);
72808
72809 - atomic_long_inc(&skb->dev->rx_dropped);
72810 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72811 kfree_skb(skb);
72812 return NET_RX_DROP;
72813 }
72814 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72815 }
72816 EXPORT_SYMBOL(netif_rx_ni);
72817
72818 -static void net_tx_action(struct softirq_action *h)
72819 +static void net_tx_action(void)
72820 {
72821 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72822
72823 @@ -3333,7 +3337,7 @@ ncls:
72824 if (pt_prev) {
72825 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72826 } else {
72827 - atomic_long_inc(&skb->dev->rx_dropped);
72828 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72829 kfree_skb(skb);
72830 /* Jamal, now you will not able to escape explaining
72831 * me how you were going to use this. :-)
72832 @@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi)
72833 }
72834 EXPORT_SYMBOL(netif_napi_del);
72835
72836 -static void net_rx_action(struct softirq_action *h)
72837 +static void net_rx_action(void)
72838 {
72839 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72840 unsigned long time_limit = jiffies + 2;
72841 @@ -5955,7 +5959,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72842 } else {
72843 netdev_stats_to_stats64(storage, &dev->stats);
72844 }
72845 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72846 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72847 return storage;
72848 }
72849 EXPORT_SYMBOL(dev_get_stats);
72850 diff --git a/net/core/flow.c b/net/core/flow.c
72851 index e318c7e..168b1d0 100644
72852 --- a/net/core/flow.c
72853 +++ b/net/core/flow.c
72854 @@ -61,7 +61,7 @@ struct flow_cache {
72855 struct timer_list rnd_timer;
72856 };
72857
72858 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72859 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72860 EXPORT_SYMBOL(flow_cache_genid);
72861 static struct flow_cache flow_cache_global;
72862 static struct kmem_cache *flow_cachep __read_mostly;
72863 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72864
72865 static int flow_entry_valid(struct flow_cache_entry *fle)
72866 {
72867 - if (atomic_read(&flow_cache_genid) != fle->genid)
72868 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72869 return 0;
72870 if (fle->object && !fle->object->ops->check(fle->object))
72871 return 0;
72872 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72873 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72874 fcp->hash_count++;
72875 }
72876 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72877 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72878 flo = fle->object;
72879 if (!flo)
72880 goto ret_object;
72881 @@ -280,7 +280,7 @@ nocache:
72882 }
72883 flo = resolver(net, key, family, dir, flo, ctx);
72884 if (fle) {
72885 - fle->genid = atomic_read(&flow_cache_genid);
72886 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72887 if (!IS_ERR(flo))
72888 fle->object = flo;
72889 else
72890 diff --git a/net/core/iovec.c b/net/core/iovec.c
72891 index c40f27e..7f49254 100644
72892 --- a/net/core/iovec.c
72893 +++ b/net/core/iovec.c
72894 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72895 if (m->msg_namelen) {
72896 if (mode == VERIFY_READ) {
72897 void __user *namep;
72898 - namep = (void __user __force *) m->msg_name;
72899 + namep = (void __force_user *) m->msg_name;
72900 err = move_addr_to_kernel(namep, m->msg_namelen,
72901 address);
72902 if (err < 0)
72903 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72904 }
72905
72906 size = m->msg_iovlen * sizeof(struct iovec);
72907 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72908 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72909 return -EFAULT;
72910
72911 m->msg_iov = iov;
72912 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72913 index 9083e82..1673203 100644
72914 --- a/net/core/rtnetlink.c
72915 +++ b/net/core/rtnetlink.c
72916 @@ -57,7 +57,7 @@ struct rtnl_link {
72917 rtnl_doit_func doit;
72918 rtnl_dumpit_func dumpit;
72919 rtnl_calcit_func calcit;
72920 -};
72921 +} __no_const;
72922
72923 static DEFINE_MUTEX(rtnl_mutex);
72924 static u16 min_ifinfo_dump_size;
72925 diff --git a/net/core/scm.c b/net/core/scm.c
72926 index ff52ad0..aff1c0f 100644
72927 --- a/net/core/scm.c
72928 +++ b/net/core/scm.c
72929 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72930 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72931 {
72932 struct cmsghdr __user *cm
72933 - = (__force struct cmsghdr __user *)msg->msg_control;
72934 + = (struct cmsghdr __force_user *)msg->msg_control;
72935 struct cmsghdr cmhdr;
72936 int cmlen = CMSG_LEN(len);
72937 int err;
72938 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72939 err = -EFAULT;
72940 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72941 goto out;
72942 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72943 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72944 goto out;
72945 cmlen = CMSG_SPACE(len);
72946 if (msg->msg_controllen < cmlen)
72947 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72948 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72949 {
72950 struct cmsghdr __user *cm
72951 - = (__force struct cmsghdr __user*)msg->msg_control;
72952 + = (struct cmsghdr __force_user *)msg->msg_control;
72953
72954 int fdmax = 0;
72955 int fdnum = scm->fp->count;
72956 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72957 if (fdnum < fdmax)
72958 fdmax = fdnum;
72959
72960 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72961 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72962 i++, cmfptr++)
72963 {
72964 int new_fd;
72965 diff --git a/net/core/sock.c b/net/core/sock.c
72966 index b23f174..b9a0d26 100644
72967 --- a/net/core/sock.c
72968 +++ b/net/core/sock.c
72969 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72970 struct sk_buff_head *list = &sk->sk_receive_queue;
72971
72972 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72973 - atomic_inc(&sk->sk_drops);
72974 + atomic_inc_unchecked(&sk->sk_drops);
72975 trace_sock_rcvqueue_full(sk, skb);
72976 return -ENOMEM;
72977 }
72978 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72979 return err;
72980
72981 if (!sk_rmem_schedule(sk, skb->truesize)) {
72982 - atomic_inc(&sk->sk_drops);
72983 + atomic_inc_unchecked(&sk->sk_drops);
72984 return -ENOBUFS;
72985 }
72986
72987 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72988 skb_dst_force(skb);
72989
72990 spin_lock_irqsave(&list->lock, flags);
72991 - skb->dropcount = atomic_read(&sk->sk_drops);
72992 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72993 __skb_queue_tail(list, skb);
72994 spin_unlock_irqrestore(&list->lock, flags);
72995
72996 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72997 skb->dev = NULL;
72998
72999 if (sk_rcvqueues_full(sk, skb)) {
73000 - atomic_inc(&sk->sk_drops);
73001 + atomic_inc_unchecked(&sk->sk_drops);
73002 goto discard_and_relse;
73003 }
73004 if (nested)
73005 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
73006 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
73007 } else if (sk_add_backlog(sk, skb)) {
73008 bh_unlock_sock(sk);
73009 - atomic_inc(&sk->sk_drops);
73010 + atomic_inc_unchecked(&sk->sk_drops);
73011 goto discard_and_relse;
73012 }
73013
73014 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
73015 if (len > sizeof(peercred))
73016 len = sizeof(peercred);
73017 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
73018 - if (copy_to_user(optval, &peercred, len))
73019 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
73020 return -EFAULT;
73021 goto lenout;
73022 }
73023 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
73024 return -ENOTCONN;
73025 if (lv < len)
73026 return -EINVAL;
73027 - if (copy_to_user(optval, address, len))
73028 + if (len > sizeof(address) || copy_to_user(optval, address, len))
73029 return -EFAULT;
73030 goto lenout;
73031 }
73032 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
73033
73034 if (len > lv)
73035 len = lv;
73036 - if (copy_to_user(optval, &v, len))
73037 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
73038 return -EFAULT;
73039 lenout:
73040 if (put_user(len, optlen))
73041 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
73042 */
73043 smp_wmb();
73044 atomic_set(&sk->sk_refcnt, 1);
73045 - atomic_set(&sk->sk_drops, 0);
73046 + atomic_set_unchecked(&sk->sk_drops, 0);
73047 }
73048 EXPORT_SYMBOL(sock_init_data);
73049
73050 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
73051 index 02e75d1..9a57a7c 100644
73052 --- a/net/decnet/sysctl_net_decnet.c
73053 +++ b/net/decnet/sysctl_net_decnet.c
73054 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
73055
73056 if (len > *lenp) len = *lenp;
73057
73058 - if (copy_to_user(buffer, addr, len))
73059 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
73060 return -EFAULT;
73061
73062 *lenp = len;
73063 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
73064
73065 if (len > *lenp) len = *lenp;
73066
73067 - if (copy_to_user(buffer, devname, len))
73068 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
73069 return -EFAULT;
73070
73071 *lenp = len;
73072 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
73073 index 39a2d29..f39c0fe 100644
73074 --- a/net/econet/Kconfig
73075 +++ b/net/econet/Kconfig
73076 @@ -4,7 +4,7 @@
73077
73078 config ECONET
73079 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
73080 - depends on EXPERIMENTAL && INET
73081 + depends on EXPERIMENTAL && INET && BROKEN
73082 ---help---
73083 Econet is a fairly old and slow networking protocol mainly used by
73084 Acorn computers to access file and print servers. It uses native
73085 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
73086 index 92fc5f6..b790d91 100644
73087 --- a/net/ipv4/fib_frontend.c
73088 +++ b/net/ipv4/fib_frontend.c
73089 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
73090 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73091 fib_sync_up(dev);
73092 #endif
73093 - atomic_inc(&net->ipv4.dev_addr_genid);
73094 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73095 rt_cache_flush(dev_net(dev), -1);
73096 break;
73097 case NETDEV_DOWN:
73098 fib_del_ifaddr(ifa, NULL);
73099 - atomic_inc(&net->ipv4.dev_addr_genid);
73100 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73101 if (ifa->ifa_dev->ifa_list == NULL) {
73102 /* Last address was deleted from this interface.
73103 * Disable IP.
73104 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
73105 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73106 fib_sync_up(dev);
73107 #endif
73108 - atomic_inc(&net->ipv4.dev_addr_genid);
73109 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73110 rt_cache_flush(dev_net(dev), -1);
73111 break;
73112 case NETDEV_DOWN:
73113 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
73114 index 80106d8..232e898 100644
73115 --- a/net/ipv4/fib_semantics.c
73116 +++ b/net/ipv4/fib_semantics.c
73117 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
73118 nh->nh_saddr = inet_select_addr(nh->nh_dev,
73119 nh->nh_gw,
73120 nh->nh_parent->fib_scope);
73121 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
73122 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
73123
73124 return nh->nh_saddr;
73125 }
73126 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
73127 index ccee270..db23c3c 100644
73128 --- a/net/ipv4/inet_diag.c
73129 +++ b/net/ipv4/inet_diag.c
73130 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
73131 r->idiag_retrans = 0;
73132
73133 r->id.idiag_if = sk->sk_bound_dev_if;
73134 +
73135 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73136 + r->id.idiag_cookie[0] = 0;
73137 + r->id.idiag_cookie[1] = 0;
73138 +#else
73139 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
73140 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
73141 +#endif
73142
73143 r->id.idiag_sport = inet->inet_sport;
73144 r->id.idiag_dport = inet->inet_dport;
73145 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
73146 r->idiag_family = tw->tw_family;
73147 r->idiag_retrans = 0;
73148 r->id.idiag_if = tw->tw_bound_dev_if;
73149 +
73150 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73151 + r->id.idiag_cookie[0] = 0;
73152 + r->id.idiag_cookie[1] = 0;
73153 +#else
73154 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
73155 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
73156 +#endif
73157 +
73158 r->id.idiag_sport = tw->tw_sport;
73159 r->id.idiag_dport = tw->tw_dport;
73160 r->id.idiag_src[0] = tw->tw_rcv_saddr;
73161 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
73162 if (sk == NULL)
73163 goto unlock;
73164
73165 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73166 err = -ESTALE;
73167 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
73168 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
73169 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
73170 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
73171 goto out;
73172 +#endif
73173
73174 err = -ENOMEM;
73175 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
73176 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
73177 r->idiag_retrans = req->retrans;
73178
73179 r->id.idiag_if = sk->sk_bound_dev_if;
73180 +
73181 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73182 + r->id.idiag_cookie[0] = 0;
73183 + r->id.idiag_cookie[1] = 0;
73184 +#else
73185 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
73186 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
73187 +#endif
73188
73189 tmo = req->expires - jiffies;
73190 if (tmo < 0)
73191 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
73192 index 984ec65..97ac518 100644
73193 --- a/net/ipv4/inet_hashtables.c
73194 +++ b/net/ipv4/inet_hashtables.c
73195 @@ -18,12 +18,15 @@
73196 #include <linux/sched.h>
73197 #include <linux/slab.h>
73198 #include <linux/wait.h>
73199 +#include <linux/security.h>
73200
73201 #include <net/inet_connection_sock.h>
73202 #include <net/inet_hashtables.h>
73203 #include <net/secure_seq.h>
73204 #include <net/ip.h>
73205
73206 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
73207 +
73208 /*
73209 * Allocate and initialize a new local port bind bucket.
73210 * The bindhash mutex for snum's hash chain must be held here.
73211 @@ -530,6 +533,8 @@ ok:
73212 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
73213 spin_unlock(&head->lock);
73214
73215 + gr_update_task_in_ip_table(current, inet_sk(sk));
73216 +
73217 if (tw) {
73218 inet_twsk_deschedule(tw, death_row);
73219 while (twrefcnt) {
73220 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
73221 index 86f13c67..59a35b5 100644
73222 --- a/net/ipv4/inetpeer.c
73223 +++ b/net/ipv4/inetpeer.c
73224 @@ -436,8 +436,8 @@ relookup:
73225 if (p) {
73226 p->daddr = *daddr;
73227 atomic_set(&p->refcnt, 1);
73228 - atomic_set(&p->rid, 0);
73229 - atomic_set(&p->ip_id_count,
73230 + atomic_set_unchecked(&p->rid, 0);
73231 + atomic_set_unchecked(&p->ip_id_count,
73232 (daddr->family == AF_INET) ?
73233 secure_ip_id(daddr->addr.a4) :
73234 secure_ipv6_id(daddr->addr.a6));
73235 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
73236 index fdaabf2..0ec3205 100644
73237 --- a/net/ipv4/ip_fragment.c
73238 +++ b/net/ipv4/ip_fragment.c
73239 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
73240 return 0;
73241
73242 start = qp->rid;
73243 - end = atomic_inc_return(&peer->rid);
73244 + end = atomic_inc_return_unchecked(&peer->rid);
73245 qp->rid = end;
73246
73247 rc = qp->q.fragments && (end - start) > max;
73248 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
73249 index 09ff51b..d3968eb 100644
73250 --- a/net/ipv4/ip_sockglue.c
73251 +++ b/net/ipv4/ip_sockglue.c
73252 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73253 len = min_t(unsigned int, len, opt->optlen);
73254 if (put_user(len, optlen))
73255 return -EFAULT;
73256 - if (copy_to_user(optval, opt->__data, len))
73257 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
73258 + copy_to_user(optval, opt->__data, len))
73259 return -EFAULT;
73260 return 0;
73261 }
73262 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73263 if (sk->sk_type != SOCK_STREAM)
73264 return -ENOPROTOOPT;
73265
73266 - msg.msg_control = optval;
73267 + msg.msg_control = (void __force_kernel *)optval;
73268 msg.msg_controllen = len;
73269 msg.msg_flags = flags;
73270
73271 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
73272 index 99ec116..c5628fe 100644
73273 --- a/net/ipv4/ipconfig.c
73274 +++ b/net/ipv4/ipconfig.c
73275 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
73276
73277 mm_segment_t oldfs = get_fs();
73278 set_fs(get_ds());
73279 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73280 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73281 set_fs(oldfs);
73282 return res;
73283 }
73284 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
73285
73286 mm_segment_t oldfs = get_fs();
73287 set_fs(get_ds());
73288 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73289 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73290 set_fs(oldfs);
73291 return res;
73292 }
73293 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
73294
73295 mm_segment_t oldfs = get_fs();
73296 set_fs(get_ds());
73297 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
73298 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73299 set_fs(oldfs);
73300 return res;
73301 }
73302 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73303 index 2133c30..5c4b40b 100644
73304 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
73305 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73306 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
73307
73308 *len = 0;
73309
73310 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73311 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73312 if (*octets == NULL)
73313 return 0;
73314
73315 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
73316 index 43d4c3b..1914409 100644
73317 --- a/net/ipv4/ping.c
73318 +++ b/net/ipv4/ping.c
73319 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
73320 sk_rmem_alloc_get(sp),
73321 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73322 atomic_read(&sp->sk_refcnt), sp,
73323 - atomic_read(&sp->sk_drops), len);
73324 + atomic_read_unchecked(&sp->sk_drops), len);
73325 }
73326
73327 static int ping_seq_show(struct seq_file *seq, void *v)
73328 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
73329 index 007e2eb..85a18a0 100644
73330 --- a/net/ipv4/raw.c
73331 +++ b/net/ipv4/raw.c
73332 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
73333 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73334 {
73335 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73336 - atomic_inc(&sk->sk_drops);
73337 + atomic_inc_unchecked(&sk->sk_drops);
73338 kfree_skb(skb);
73339 return NET_RX_DROP;
73340 }
73341 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
73342
73343 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73344 {
73345 + struct icmp_filter filter;
73346 +
73347 if (optlen > sizeof(struct icmp_filter))
73348 optlen = sizeof(struct icmp_filter);
73349 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73350 + if (copy_from_user(&filter, optval, optlen))
73351 return -EFAULT;
73352 + raw_sk(sk)->filter = filter;
73353 return 0;
73354 }
73355
73356 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73357 {
73358 int len, ret = -EFAULT;
73359 + struct icmp_filter filter;
73360
73361 if (get_user(len, optlen))
73362 goto out;
73363 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73364 if (len > sizeof(struct icmp_filter))
73365 len = sizeof(struct icmp_filter);
73366 ret = -EFAULT;
73367 - if (put_user(len, optlen) ||
73368 - copy_to_user(optval, &raw_sk(sk)->filter, len))
73369 + filter = raw_sk(sk)->filter;
73370 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73371 goto out;
73372 ret = 0;
73373 out: return ret;
73374 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73375 sk_wmem_alloc_get(sp),
73376 sk_rmem_alloc_get(sp),
73377 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73378 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73379 + atomic_read(&sp->sk_refcnt),
73380 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73381 + NULL,
73382 +#else
73383 + sp,
73384 +#endif
73385 + atomic_read_unchecked(&sp->sk_drops));
73386 }
73387
73388 static int raw_seq_show(struct seq_file *seq, void *v)
73389 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73390 index 94cdbc5..0cb0063 100644
73391 --- a/net/ipv4/route.c
73392 +++ b/net/ipv4/route.c
73393 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73394
73395 static inline int rt_genid(struct net *net)
73396 {
73397 - return atomic_read(&net->ipv4.rt_genid);
73398 + return atomic_read_unchecked(&net->ipv4.rt_genid);
73399 }
73400
73401 #ifdef CONFIG_PROC_FS
73402 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73403 unsigned char shuffle;
73404
73405 get_random_bytes(&shuffle, sizeof(shuffle));
73406 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73407 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73408 redirect_genid++;
73409 }
73410
73411 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73412 error = rt->dst.error;
73413 if (peer) {
73414 inet_peer_refcheck(rt->peer);
73415 - id = atomic_read(&peer->ip_id_count) & 0xffff;
73416 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73417 if (peer->tcp_ts_stamp) {
73418 ts = peer->tcp_ts;
73419 tsage = get_seconds() - peer->tcp_ts_stamp;
73420 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73421 index eb90aa8..22bf114 100644
73422 --- a/net/ipv4/tcp_ipv4.c
73423 +++ b/net/ipv4/tcp_ipv4.c
73424 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73425 int sysctl_tcp_low_latency __read_mostly;
73426 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73427
73428 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73429 +extern int grsec_enable_blackhole;
73430 +#endif
73431
73432 #ifdef CONFIG_TCP_MD5SIG
73433 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73434 @@ -1632,6 +1635,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73435 return 0;
73436
73437 reset:
73438 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73439 + if (!grsec_enable_blackhole)
73440 +#endif
73441 tcp_v4_send_reset(rsk, skb);
73442 discard:
73443 kfree_skb(skb);
73444 @@ -1694,12 +1700,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73445 TCP_SKB_CB(skb)->sacked = 0;
73446
73447 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73448 - if (!sk)
73449 + if (!sk) {
73450 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73451 + ret = 1;
73452 +#endif
73453 goto no_tcp_socket;
73454 -
73455 + }
73456 process:
73457 - if (sk->sk_state == TCP_TIME_WAIT)
73458 + if (sk->sk_state == TCP_TIME_WAIT) {
73459 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73460 + ret = 2;
73461 +#endif
73462 goto do_time_wait;
73463 + }
73464
73465 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73466 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73467 @@ -1749,6 +1762,10 @@ no_tcp_socket:
73468 bad_packet:
73469 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73470 } else {
73471 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73472 + if (!grsec_enable_blackhole || (ret == 1 &&
73473 + (skb->dev->flags & IFF_LOOPBACK)))
73474 +#endif
73475 tcp_v4_send_reset(NULL, skb);
73476 }
73477
73478 @@ -2409,7 +2426,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73479 0, /* non standard timer */
73480 0, /* open_requests have no inode */
73481 atomic_read(&sk->sk_refcnt),
73482 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73483 + NULL,
73484 +#else
73485 req,
73486 +#endif
73487 len);
73488 }
73489
73490 @@ -2459,7 +2480,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73491 sock_i_uid(sk),
73492 icsk->icsk_probes_out,
73493 sock_i_ino(sk),
73494 - atomic_read(&sk->sk_refcnt), sk,
73495 + atomic_read(&sk->sk_refcnt),
73496 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73497 + NULL,
73498 +#else
73499 + sk,
73500 +#endif
73501 jiffies_to_clock_t(icsk->icsk_rto),
73502 jiffies_to_clock_t(icsk->icsk_ack.ato),
73503 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73504 @@ -2487,7 +2513,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73505 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73506 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73507 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73508 - atomic_read(&tw->tw_refcnt), tw, len);
73509 + atomic_read(&tw->tw_refcnt),
73510 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73511 + NULL,
73512 +#else
73513 + tw,
73514 +#endif
73515 + len);
73516 }
73517
73518 #define TMPSZ 150
73519 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73520 index 66363b6..b0654a3 100644
73521 --- a/net/ipv4/tcp_minisocks.c
73522 +++ b/net/ipv4/tcp_minisocks.c
73523 @@ -27,6 +27,10 @@
73524 #include <net/inet_common.h>
73525 #include <net/xfrm.h>
73526
73527 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73528 +extern int grsec_enable_blackhole;
73529 +#endif
73530 +
73531 int sysctl_tcp_syncookies __read_mostly = 1;
73532 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73533
73534 @@ -751,6 +755,10 @@ listen_overflow:
73535
73536 embryonic_reset:
73537 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73538 +
73539 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73540 + if (!grsec_enable_blackhole)
73541 +#endif
73542 if (!(flg & TCP_FLAG_RST))
73543 req->rsk_ops->send_reset(sk, skb);
73544
73545 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73546 index 85ee7eb..53277ab 100644
73547 --- a/net/ipv4/tcp_probe.c
73548 +++ b/net/ipv4/tcp_probe.c
73549 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73550 if (cnt + width >= len)
73551 break;
73552
73553 - if (copy_to_user(buf + cnt, tbuf, width))
73554 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73555 return -EFAULT;
73556 cnt += width;
73557 }
73558 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73559 index 2e0f0af..e2948bf 100644
73560 --- a/net/ipv4/tcp_timer.c
73561 +++ b/net/ipv4/tcp_timer.c
73562 @@ -22,6 +22,10 @@
73563 #include <linux/gfp.h>
73564 #include <net/tcp.h>
73565
73566 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73567 +extern int grsec_lastack_retries;
73568 +#endif
73569 +
73570 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73571 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73572 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73573 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73574 }
73575 }
73576
73577 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73578 + if ((sk->sk_state == TCP_LAST_ACK) &&
73579 + (grsec_lastack_retries > 0) &&
73580 + (grsec_lastack_retries < retry_until))
73581 + retry_until = grsec_lastack_retries;
73582 +#endif
73583 +
73584 if (retransmits_timed_out(sk, retry_until,
73585 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73586 /* Has it gone just too far? */
73587 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73588 index 5a65eea..bd913a1 100644
73589 --- a/net/ipv4/udp.c
73590 +++ b/net/ipv4/udp.c
73591 @@ -86,6 +86,7 @@
73592 #include <linux/types.h>
73593 #include <linux/fcntl.h>
73594 #include <linux/module.h>
73595 +#include <linux/security.h>
73596 #include <linux/socket.h>
73597 #include <linux/sockios.h>
73598 #include <linux/igmp.h>
73599 @@ -108,6 +109,10 @@
73600 #include <trace/events/udp.h>
73601 #include "udp_impl.h"
73602
73603 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73604 +extern int grsec_enable_blackhole;
73605 +#endif
73606 +
73607 struct udp_table udp_table __read_mostly;
73608 EXPORT_SYMBOL(udp_table);
73609
73610 @@ -565,6 +570,9 @@ found:
73611 return s;
73612 }
73613
73614 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73615 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73616 +
73617 /*
73618 * This routine is called by the ICMP module when it gets some
73619 * sort of error condition. If err < 0 then the socket should
73620 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73621 dport = usin->sin_port;
73622 if (dport == 0)
73623 return -EINVAL;
73624 +
73625 + err = gr_search_udp_sendmsg(sk, usin);
73626 + if (err)
73627 + return err;
73628 } else {
73629 if (sk->sk_state != TCP_ESTABLISHED)
73630 return -EDESTADDRREQ;
73631 +
73632 + err = gr_search_udp_sendmsg(sk, NULL);
73633 + if (err)
73634 + return err;
73635 +
73636 daddr = inet->inet_daddr;
73637 dport = inet->inet_dport;
73638 /* Open fast path for connected socket.
73639 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73640 udp_lib_checksum_complete(skb)) {
73641 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73642 IS_UDPLITE(sk));
73643 - atomic_inc(&sk->sk_drops);
73644 + atomic_inc_unchecked(&sk->sk_drops);
73645 __skb_unlink(skb, rcvq);
73646 __skb_queue_tail(&list_kill, skb);
73647 }
73648 @@ -1185,6 +1202,10 @@ try_again:
73649 if (!skb)
73650 goto out;
73651
73652 + err = gr_search_udp_recvmsg(sk, skb);
73653 + if (err)
73654 + goto out_free;
73655 +
73656 ulen = skb->len - sizeof(struct udphdr);
73657 copied = len;
73658 if (copied > ulen)
73659 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73660
73661 drop:
73662 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73663 - atomic_inc(&sk->sk_drops);
73664 + atomic_inc_unchecked(&sk->sk_drops);
73665 kfree_skb(skb);
73666 return -1;
73667 }
73668 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73669 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73670
73671 if (!skb1) {
73672 - atomic_inc(&sk->sk_drops);
73673 + atomic_inc_unchecked(&sk->sk_drops);
73674 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73675 IS_UDPLITE(sk));
73676 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73677 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73678 goto csum_error;
73679
73680 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73681 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73682 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73683 +#endif
73684 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73685
73686 /*
73687 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73688 sk_wmem_alloc_get(sp),
73689 sk_rmem_alloc_get(sp),
73690 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73691 - atomic_read(&sp->sk_refcnt), sp,
73692 - atomic_read(&sp->sk_drops), len);
73693 + atomic_read(&sp->sk_refcnt),
73694 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73695 + NULL,
73696 +#else
73697 + sp,
73698 +#endif
73699 + atomic_read_unchecked(&sp->sk_drops), len);
73700 }
73701
73702 int udp4_seq_show(struct seq_file *seq, void *v)
73703 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73704 index 836c4ea..cbb74dc 100644
73705 --- a/net/ipv6/addrconf.c
73706 +++ b/net/ipv6/addrconf.c
73707 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73708 p.iph.ihl = 5;
73709 p.iph.protocol = IPPROTO_IPV6;
73710 p.iph.ttl = 64;
73711 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73712 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73713
73714 if (ops->ndo_do_ioctl) {
73715 mm_segment_t oldfs = get_fs();
73716 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73717 index 1567fb1..29af910 100644
73718 --- a/net/ipv6/inet6_connection_sock.c
73719 +++ b/net/ipv6/inet6_connection_sock.c
73720 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73721 #ifdef CONFIG_XFRM
73722 {
73723 struct rt6_info *rt = (struct rt6_info *)dst;
73724 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73725 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73726 }
73727 #endif
73728 }
73729 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73730 #ifdef CONFIG_XFRM
73731 if (dst) {
73732 struct rt6_info *rt = (struct rt6_info *)dst;
73733 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73734 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73735 __sk_dst_reset(sk);
73736 dst = NULL;
73737 }
73738 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73739 index 26cb08c..8af9877 100644
73740 --- a/net/ipv6/ipv6_sockglue.c
73741 +++ b/net/ipv6/ipv6_sockglue.c
73742 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73743 if (sk->sk_type != SOCK_STREAM)
73744 return -ENOPROTOOPT;
73745
73746 - msg.msg_control = optval;
73747 + msg.msg_control = (void __force_kernel *)optval;
73748 msg.msg_controllen = len;
73749 msg.msg_flags = flags;
73750
73751 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73752 index 361ebf3..d5628fb 100644
73753 --- a/net/ipv6/raw.c
73754 +++ b/net/ipv6/raw.c
73755 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73756 {
73757 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73758 skb_checksum_complete(skb)) {
73759 - atomic_inc(&sk->sk_drops);
73760 + atomic_inc_unchecked(&sk->sk_drops);
73761 kfree_skb(skb);
73762 return NET_RX_DROP;
73763 }
73764 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73765 struct raw6_sock *rp = raw6_sk(sk);
73766
73767 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73768 - atomic_inc(&sk->sk_drops);
73769 + atomic_inc_unchecked(&sk->sk_drops);
73770 kfree_skb(skb);
73771 return NET_RX_DROP;
73772 }
73773 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73774
73775 if (inet->hdrincl) {
73776 if (skb_checksum_complete(skb)) {
73777 - atomic_inc(&sk->sk_drops);
73778 + atomic_inc_unchecked(&sk->sk_drops);
73779 kfree_skb(skb);
73780 return NET_RX_DROP;
73781 }
73782 @@ -601,7 +601,7 @@ out:
73783 return err;
73784 }
73785
73786 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73787 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73788 struct flowi6 *fl6, struct dst_entry **dstp,
73789 unsigned int flags)
73790 {
73791 @@ -909,12 +909,15 @@ do_confirm:
73792 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73793 char __user *optval, int optlen)
73794 {
73795 + struct icmp6_filter filter;
73796 +
73797 switch (optname) {
73798 case ICMPV6_FILTER:
73799 if (optlen > sizeof(struct icmp6_filter))
73800 optlen = sizeof(struct icmp6_filter);
73801 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73802 + if (copy_from_user(&filter, optval, optlen))
73803 return -EFAULT;
73804 + raw6_sk(sk)->filter = filter;
73805 return 0;
73806 default:
73807 return -ENOPROTOOPT;
73808 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73809 char __user *optval, int __user *optlen)
73810 {
73811 int len;
73812 + struct icmp6_filter filter;
73813
73814 switch (optname) {
73815 case ICMPV6_FILTER:
73816 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73817 len = sizeof(struct icmp6_filter);
73818 if (put_user(len, optlen))
73819 return -EFAULT;
73820 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73821 + filter = raw6_sk(sk)->filter;
73822 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73823 return -EFAULT;
73824 return 0;
73825 default:
73826 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73827 0, 0L, 0,
73828 sock_i_uid(sp), 0,
73829 sock_i_ino(sp),
73830 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73831 + atomic_read(&sp->sk_refcnt),
73832 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73833 + NULL,
73834 +#else
73835 + sp,
73836 +#endif
73837 + atomic_read_unchecked(&sp->sk_drops));
73838 }
73839
73840 static int raw6_seq_show(struct seq_file *seq, void *v)
73841 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73842 index b859e4a..f9d1589 100644
73843 --- a/net/ipv6/tcp_ipv6.c
73844 +++ b/net/ipv6/tcp_ipv6.c
73845 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73846 }
73847 #endif
73848
73849 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73850 +extern int grsec_enable_blackhole;
73851 +#endif
73852 +
73853 static void tcp_v6_hash(struct sock *sk)
73854 {
73855 if (sk->sk_state != TCP_CLOSE) {
73856 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73857 return 0;
73858
73859 reset:
73860 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73861 + if (!grsec_enable_blackhole)
73862 +#endif
73863 tcp_v6_send_reset(sk, skb);
73864 discard:
73865 if (opt_skb)
73866 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73867 TCP_SKB_CB(skb)->sacked = 0;
73868
73869 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73870 - if (!sk)
73871 + if (!sk) {
73872 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73873 + ret = 1;
73874 +#endif
73875 goto no_tcp_socket;
73876 + }
73877
73878 process:
73879 - if (sk->sk_state == TCP_TIME_WAIT)
73880 + if (sk->sk_state == TCP_TIME_WAIT) {
73881 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73882 + ret = 2;
73883 +#endif
73884 goto do_time_wait;
73885 + }
73886
73887 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73888 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73889 @@ -1783,6 +1798,10 @@ no_tcp_socket:
73890 bad_packet:
73891 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73892 } else {
73893 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73894 + if (!grsec_enable_blackhole || (ret == 1 &&
73895 + (skb->dev->flags & IFF_LOOPBACK)))
73896 +#endif
73897 tcp_v6_send_reset(NULL, skb);
73898 }
73899
73900 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73901 uid,
73902 0, /* non standard timer */
73903 0, /* open_requests have no inode */
73904 - 0, req);
73905 + 0,
73906 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73907 + NULL
73908 +#else
73909 + req
73910 +#endif
73911 + );
73912 }
73913
73914 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73915 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73916 sock_i_uid(sp),
73917 icsk->icsk_probes_out,
73918 sock_i_ino(sp),
73919 - atomic_read(&sp->sk_refcnt), sp,
73920 + atomic_read(&sp->sk_refcnt),
73921 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73922 + NULL,
73923 +#else
73924 + sp,
73925 +#endif
73926 jiffies_to_clock_t(icsk->icsk_rto),
73927 jiffies_to_clock_t(icsk->icsk_ack.ato),
73928 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73929 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73930 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73931 tw->tw_substate, 0, 0,
73932 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73933 - atomic_read(&tw->tw_refcnt), tw);
73934 + atomic_read(&tw->tw_refcnt),
73935 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73936 + NULL
73937 +#else
73938 + tw
73939 +#endif
73940 + );
73941 }
73942
73943 static int tcp6_seq_show(struct seq_file *seq, void *v)
73944 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73945 index 8c25419..47a51ae 100644
73946 --- a/net/ipv6/udp.c
73947 +++ b/net/ipv6/udp.c
73948 @@ -50,6 +50,10 @@
73949 #include <linux/seq_file.h>
73950 #include "udp_impl.h"
73951
73952 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73953 +extern int grsec_enable_blackhole;
73954 +#endif
73955 +
73956 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73957 {
73958 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73959 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73960
73961 return 0;
73962 drop:
73963 - atomic_inc(&sk->sk_drops);
73964 + atomic_inc_unchecked(&sk->sk_drops);
73965 drop_no_sk_drops_inc:
73966 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73967 kfree_skb(skb);
73968 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73969 continue;
73970 }
73971 drop:
73972 - atomic_inc(&sk->sk_drops);
73973 + atomic_inc_unchecked(&sk->sk_drops);
73974 UDP6_INC_STATS_BH(sock_net(sk),
73975 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73976 UDP6_INC_STATS_BH(sock_net(sk),
73977 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73978 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73979 proto == IPPROTO_UDPLITE);
73980
73981 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73982 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73983 +#endif
73984 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73985
73986 kfree_skb(skb);
73987 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73988 if (!sock_owned_by_user(sk))
73989 udpv6_queue_rcv_skb(sk, skb);
73990 else if (sk_add_backlog(sk, skb)) {
73991 - atomic_inc(&sk->sk_drops);
73992 + atomic_inc_unchecked(&sk->sk_drops);
73993 bh_unlock_sock(sk);
73994 sock_put(sk);
73995 goto discard;
73996 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73997 0, 0L, 0,
73998 sock_i_uid(sp), 0,
73999 sock_i_ino(sp),
74000 - atomic_read(&sp->sk_refcnt), sp,
74001 - atomic_read(&sp->sk_drops));
74002 + atomic_read(&sp->sk_refcnt),
74003 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74004 + NULL,
74005 +#else
74006 + sp,
74007 +#endif
74008 + atomic_read_unchecked(&sp->sk_drops));
74009 }
74010
74011 int udp6_seq_show(struct seq_file *seq, void *v)
74012 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
74013 index 253695d..9481ce8 100644
74014 --- a/net/irda/ircomm/ircomm_tty.c
74015 +++ b/net/irda/ircomm/ircomm_tty.c
74016 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74017 add_wait_queue(&self->open_wait, &wait);
74018
74019 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
74020 - __FILE__,__LINE__, tty->driver->name, self->open_count );
74021 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74022
74023 /* As far as I can see, we protect open_count - Jean II */
74024 spin_lock_irqsave(&self->spinlock, flags);
74025 if (!tty_hung_up_p(filp)) {
74026 extra_count = 1;
74027 - self->open_count--;
74028 + local_dec(&self->open_count);
74029 }
74030 spin_unlock_irqrestore(&self->spinlock, flags);
74031 - self->blocked_open++;
74032 + local_inc(&self->blocked_open);
74033
74034 while (1) {
74035 if (tty->termios->c_cflag & CBAUD) {
74036 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74037 }
74038
74039 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
74040 - __FILE__,__LINE__, tty->driver->name, self->open_count );
74041 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74042
74043 schedule();
74044 }
74045 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74046 if (extra_count) {
74047 /* ++ is not atomic, so this should be protected - Jean II */
74048 spin_lock_irqsave(&self->spinlock, flags);
74049 - self->open_count++;
74050 + local_inc(&self->open_count);
74051 spin_unlock_irqrestore(&self->spinlock, flags);
74052 }
74053 - self->blocked_open--;
74054 + local_dec(&self->blocked_open);
74055
74056 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
74057 - __FILE__,__LINE__, tty->driver->name, self->open_count);
74058 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
74059
74060 if (!retval)
74061 self->flags |= ASYNC_NORMAL_ACTIVE;
74062 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
74063 }
74064 /* ++ is not atomic, so this should be protected - Jean II */
74065 spin_lock_irqsave(&self->spinlock, flags);
74066 - self->open_count++;
74067 + local_inc(&self->open_count);
74068
74069 tty->driver_data = self;
74070 self->tty = tty;
74071 spin_unlock_irqrestore(&self->spinlock, flags);
74072
74073 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
74074 - self->line, self->open_count);
74075 + self->line, local_read(&self->open_count));
74076
74077 /* Not really used by us, but lets do it anyway */
74078 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
74079 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74080 return;
74081 }
74082
74083 - if ((tty->count == 1) && (self->open_count != 1)) {
74084 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
74085 /*
74086 * Uh, oh. tty->count is 1, which means that the tty
74087 * structure will be freed. state->count should always
74088 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74089 */
74090 IRDA_DEBUG(0, "%s(), bad serial port count; "
74091 "tty->count is 1, state->count is %d\n", __func__ ,
74092 - self->open_count);
74093 - self->open_count = 1;
74094 + local_read(&self->open_count));
74095 + local_set(&self->open_count, 1);
74096 }
74097
74098 - if (--self->open_count < 0) {
74099 + if (local_dec_return(&self->open_count) < 0) {
74100 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
74101 - __func__, self->line, self->open_count);
74102 - self->open_count = 0;
74103 + __func__, self->line, local_read(&self->open_count));
74104 + local_set(&self->open_count, 0);
74105 }
74106 - if (self->open_count) {
74107 + if (local_read(&self->open_count)) {
74108 spin_unlock_irqrestore(&self->spinlock, flags);
74109
74110 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
74111 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74112 tty->closing = 0;
74113 self->tty = NULL;
74114
74115 - if (self->blocked_open) {
74116 + if (local_read(&self->blocked_open)) {
74117 if (self->close_delay)
74118 schedule_timeout_interruptible(self->close_delay);
74119 wake_up_interruptible(&self->open_wait);
74120 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
74121 spin_lock_irqsave(&self->spinlock, flags);
74122 self->flags &= ~ASYNC_NORMAL_ACTIVE;
74123 self->tty = NULL;
74124 - self->open_count = 0;
74125 + local_set(&self->open_count, 0);
74126 spin_unlock_irqrestore(&self->spinlock, flags);
74127
74128 wake_up_interruptible(&self->open_wait);
74129 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
74130 seq_putc(m, '\n');
74131
74132 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
74133 - seq_printf(m, "Open count: %d\n", self->open_count);
74134 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
74135 seq_printf(m, "Max data size: %d\n", self->max_data_size);
74136 seq_printf(m, "Max header size: %d\n", self->max_header_size);
74137
74138 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
74139 index 274d150..656a144 100644
74140 --- a/net/iucv/af_iucv.c
74141 +++ b/net/iucv/af_iucv.c
74142 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
74143
74144 write_lock_bh(&iucv_sk_list.lock);
74145
74146 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
74147 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74148 while (__iucv_get_sock_by_name(name)) {
74149 sprintf(name, "%08x",
74150 - atomic_inc_return(&iucv_sk_list.autobind_name));
74151 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74152 }
74153
74154 write_unlock_bh(&iucv_sk_list.lock);
74155 diff --git a/net/key/af_key.c b/net/key/af_key.c
74156 index 1e733e9..3d73c9f 100644
74157 --- a/net/key/af_key.c
74158 +++ b/net/key/af_key.c
74159 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
74160 static u32 get_acqseq(void)
74161 {
74162 u32 res;
74163 - static atomic_t acqseq;
74164 + static atomic_unchecked_t acqseq;
74165
74166 do {
74167 - res = atomic_inc_return(&acqseq);
74168 + res = atomic_inc_return_unchecked(&acqseq);
74169 } while (!res);
74170 return res;
74171 }
74172 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
74173 index 73495f1..ad51356 100644
74174 --- a/net/mac80211/ieee80211_i.h
74175 +++ b/net/mac80211/ieee80211_i.h
74176 @@ -27,6 +27,7 @@
74177 #include <net/ieee80211_radiotap.h>
74178 #include <net/cfg80211.h>
74179 #include <net/mac80211.h>
74180 +#include <asm/local.h>
74181 #include "key.h"
74182 #include "sta_info.h"
74183
74184 @@ -764,7 +765,7 @@ struct ieee80211_local {
74185 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
74186 spinlock_t queue_stop_reason_lock;
74187
74188 - int open_count;
74189 + local_t open_count;
74190 int monitors, cooked_mntrs;
74191 /* number of interfaces with corresponding FIF_ flags */
74192 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
74193 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
74194 index 30d7355..e260095 100644
74195 --- a/net/mac80211/iface.c
74196 +++ b/net/mac80211/iface.c
74197 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74198 break;
74199 }
74200
74201 - if (local->open_count == 0) {
74202 + if (local_read(&local->open_count) == 0) {
74203 res = drv_start(local);
74204 if (res)
74205 goto err_del_bss;
74206 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74207 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
74208
74209 if (!is_valid_ether_addr(dev->dev_addr)) {
74210 - if (!local->open_count)
74211 + if (!local_read(&local->open_count))
74212 drv_stop(local);
74213 return -EADDRNOTAVAIL;
74214 }
74215 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74216 mutex_unlock(&local->mtx);
74217
74218 if (coming_up)
74219 - local->open_count++;
74220 + local_inc(&local->open_count);
74221
74222 if (hw_reconf_flags) {
74223 ieee80211_hw_config(local, hw_reconf_flags);
74224 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74225 err_del_interface:
74226 drv_remove_interface(local, &sdata->vif);
74227 err_stop:
74228 - if (!local->open_count)
74229 + if (!local_read(&local->open_count))
74230 drv_stop(local);
74231 err_del_bss:
74232 sdata->bss = NULL;
74233 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74234 }
74235
74236 if (going_down)
74237 - local->open_count--;
74238 + local_dec(&local->open_count);
74239
74240 switch (sdata->vif.type) {
74241 case NL80211_IFTYPE_AP_VLAN:
74242 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74243
74244 ieee80211_recalc_ps(local, -1);
74245
74246 - if (local->open_count == 0) {
74247 + if (local_read(&local->open_count) == 0) {
74248 if (local->ops->napi_poll)
74249 napi_disable(&local->napi);
74250 ieee80211_clear_tx_pending(local);
74251 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
74252 index 7d9b21d..0687004 100644
74253 --- a/net/mac80211/main.c
74254 +++ b/net/mac80211/main.c
74255 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
74256 local->hw.conf.power_level = power;
74257 }
74258
74259 - if (changed && local->open_count) {
74260 + if (changed && local_read(&local->open_count)) {
74261 ret = drv_config(local, changed);
74262 /*
74263 * Goal:
74264 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
74265 index 9ee7164..56c5061 100644
74266 --- a/net/mac80211/pm.c
74267 +++ b/net/mac80211/pm.c
74268 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74269 struct ieee80211_sub_if_data *sdata;
74270 struct sta_info *sta;
74271
74272 - if (!local->open_count)
74273 + if (!local_read(&local->open_count))
74274 goto suspend;
74275
74276 ieee80211_scan_cancel(local);
74277 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74278 cancel_work_sync(&local->dynamic_ps_enable_work);
74279 del_timer_sync(&local->dynamic_ps_timer);
74280
74281 - local->wowlan = wowlan && local->open_count;
74282 + local->wowlan = wowlan && local_read(&local->open_count);
74283 if (local->wowlan) {
74284 int err = drv_suspend(local, wowlan);
74285 if (err < 0) {
74286 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74287 }
74288
74289 /* stop hardware - this must stop RX */
74290 - if (local->open_count)
74291 + if (local_read(&local->open_count))
74292 ieee80211_stop_device(local);
74293
74294 suspend:
74295 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
74296 index 5a5a776..9600b11 100644
74297 --- a/net/mac80211/rate.c
74298 +++ b/net/mac80211/rate.c
74299 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
74300
74301 ASSERT_RTNL();
74302
74303 - if (local->open_count)
74304 + if (local_read(&local->open_count))
74305 return -EBUSY;
74306
74307 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74308 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
74309 index c97a065..ff61928 100644
74310 --- a/net/mac80211/rc80211_pid_debugfs.c
74311 +++ b/net/mac80211/rc80211_pid_debugfs.c
74312 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
74313
74314 spin_unlock_irqrestore(&events->lock, status);
74315
74316 - if (copy_to_user(buf, pb, p))
74317 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74318 return -EFAULT;
74319
74320 return p;
74321 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
74322 index d5230ec..c604b21 100644
74323 --- a/net/mac80211/util.c
74324 +++ b/net/mac80211/util.c
74325 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
74326 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74327
74328 /* everything else happens only if HW was up & running */
74329 - if (!local->open_count)
74330 + if (!local_read(&local->open_count))
74331 goto wake_up;
74332
74333 /*
74334 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
74335 index d5597b7..ab6d39c 100644
74336 --- a/net/netfilter/Kconfig
74337 +++ b/net/netfilter/Kconfig
74338 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
74339
74340 To compile it as a module, choose M here. If unsure, say N.
74341
74342 +config NETFILTER_XT_MATCH_GRADM
74343 + tristate '"gradm" match support'
74344 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74345 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74346 + ---help---
74347 + The gradm match allows to match on grsecurity RBAC being enabled.
74348 + It is useful when iptables rules are applied early on bootup to
74349 + prevent connections to the machine (except from a trusted host)
74350 + while the RBAC system is disabled.
74351 +
74352 config NETFILTER_XT_MATCH_HASHLIMIT
74353 tristate '"hashlimit" match support'
74354 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74355 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
74356 index 1a02853..5d8c22e 100644
74357 --- a/net/netfilter/Makefile
74358 +++ b/net/netfilter/Makefile
74359 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
74360 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74361 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74362 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74363 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74364 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74365 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74366 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74367 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74368 index 29fa5ba..8debc79 100644
74369 --- a/net/netfilter/ipvs/ip_vs_conn.c
74370 +++ b/net/netfilter/ipvs/ip_vs_conn.c
74371 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74372 /* Increase the refcnt counter of the dest */
74373 atomic_inc(&dest->refcnt);
74374
74375 - conn_flags = atomic_read(&dest->conn_flags);
74376 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
74377 if (cp->protocol != IPPROTO_UDP)
74378 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74379 /* Bind with the destination and its corresponding transmitter */
74380 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74381 atomic_set(&cp->refcnt, 1);
74382
74383 atomic_set(&cp->n_control, 0);
74384 - atomic_set(&cp->in_pkts, 0);
74385 + atomic_set_unchecked(&cp->in_pkts, 0);
74386
74387 atomic_inc(&ipvs->conn_count);
74388 if (flags & IP_VS_CONN_F_NO_CPORT)
74389 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74390
74391 /* Don't drop the entry if its number of incoming packets is not
74392 located in [0, 8] */
74393 - i = atomic_read(&cp->in_pkts);
74394 + i = atomic_read_unchecked(&cp->in_pkts);
74395 if (i > 8 || i < 0) return 0;
74396
74397 if (!todrop_rate[i]) return 0;
74398 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74399 index 6dc7d7d..e45913a 100644
74400 --- a/net/netfilter/ipvs/ip_vs_core.c
74401 +++ b/net/netfilter/ipvs/ip_vs_core.c
74402 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74403 ret = cp->packet_xmit(skb, cp, pd->pp);
74404 /* do not touch skb anymore */
74405
74406 - atomic_inc(&cp->in_pkts);
74407 + atomic_inc_unchecked(&cp->in_pkts);
74408 ip_vs_conn_put(cp);
74409 return ret;
74410 }
74411 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74412 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74413 pkts = sysctl_sync_threshold(ipvs);
74414 else
74415 - pkts = atomic_add_return(1, &cp->in_pkts);
74416 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74417
74418 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74419 cp->protocol == IPPROTO_SCTP) {
74420 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74421 index e1a66cf..0910076 100644
74422 --- a/net/netfilter/ipvs/ip_vs_ctl.c
74423 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
74424 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74425 ip_vs_rs_hash(ipvs, dest);
74426 write_unlock_bh(&ipvs->rs_lock);
74427 }
74428 - atomic_set(&dest->conn_flags, conn_flags);
74429 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
74430
74431 /* bind the service */
74432 if (!dest->svc) {
74433 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74434 " %-7s %-6d %-10d %-10d\n",
74435 &dest->addr.in6,
74436 ntohs(dest->port),
74437 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74438 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74439 atomic_read(&dest->weight),
74440 atomic_read(&dest->activeconns),
74441 atomic_read(&dest->inactconns));
74442 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74443 "%-7s %-6d %-10d %-10d\n",
74444 ntohl(dest->addr.ip),
74445 ntohs(dest->port),
74446 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74447 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74448 atomic_read(&dest->weight),
74449 atomic_read(&dest->activeconns),
74450 atomic_read(&dest->inactconns));
74451 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74452
74453 entry.addr = dest->addr.ip;
74454 entry.port = dest->port;
74455 - entry.conn_flags = atomic_read(&dest->conn_flags);
74456 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74457 entry.weight = atomic_read(&dest->weight);
74458 entry.u_threshold = dest->u_threshold;
74459 entry.l_threshold = dest->l_threshold;
74460 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74461 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74462
74463 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74464 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74465 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74466 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74467 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74468 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74469 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74470 index 2b6678c0..aaa41fc 100644
74471 --- a/net/netfilter/ipvs/ip_vs_sync.c
74472 +++ b/net/netfilter/ipvs/ip_vs_sync.c
74473 @@ -649,7 +649,7 @@ control:
74474 * i.e only increment in_pkts for Templates.
74475 */
74476 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74477 - int pkts = atomic_add_return(1, &cp->in_pkts);
74478 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74479
74480 if (pkts % sysctl_sync_period(ipvs) != 1)
74481 return;
74482 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74483
74484 if (opt)
74485 memcpy(&cp->in_seq, opt, sizeof(*opt));
74486 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74487 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74488 cp->state = state;
74489 cp->old_state = cp->state;
74490 /*
74491 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74492 index aa2d720..d8aa111 100644
74493 --- a/net/netfilter/ipvs/ip_vs_xmit.c
74494 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
74495 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74496 else
74497 rc = NF_ACCEPT;
74498 /* do not touch skb anymore */
74499 - atomic_inc(&cp->in_pkts);
74500 + atomic_inc_unchecked(&cp->in_pkts);
74501 goto out;
74502 }
74503
74504 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74505 else
74506 rc = NF_ACCEPT;
74507 /* do not touch skb anymore */
74508 - atomic_inc(&cp->in_pkts);
74509 + atomic_inc_unchecked(&cp->in_pkts);
74510 goto out;
74511 }
74512
74513 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74514 index 66b2c54..c7884e3 100644
74515 --- a/net/netfilter/nfnetlink_log.c
74516 +++ b/net/netfilter/nfnetlink_log.c
74517 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74518 };
74519
74520 static DEFINE_SPINLOCK(instances_lock);
74521 -static atomic_t global_seq;
74522 +static atomic_unchecked_t global_seq;
74523
74524 #define INSTANCE_BUCKETS 16
74525 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74526 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74527 /* global sequence number */
74528 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74529 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74530 - htonl(atomic_inc_return(&global_seq)));
74531 + htonl(atomic_inc_return_unchecked(&global_seq)));
74532
74533 if (data_len) {
74534 struct nlattr *nla;
74535 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74536 new file mode 100644
74537 index 0000000..6905327
74538 --- /dev/null
74539 +++ b/net/netfilter/xt_gradm.c
74540 @@ -0,0 +1,51 @@
74541 +/*
74542 + * gradm match for netfilter
74543 + * Copyright © Zbigniew Krzystolik, 2010
74544 + *
74545 + * This program is free software; you can redistribute it and/or modify
74546 + * it under the terms of the GNU General Public License; either version
74547 + * 2 or 3 as published by the Free Software Foundation.
74548 + */
74549 +#include <linux/module.h>
74550 +#include <linux/moduleparam.h>
74551 +#include <linux/skbuff.h>
74552 +#include <linux/netfilter/x_tables.h>
74553 +#include <linux/grsecurity.h>
74554 +#include <linux/netfilter/xt_gradm.h>
74555 +
74556 +static bool
74557 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74558 +{
74559 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74560 + bool retval = false;
74561 + if (gr_acl_is_enabled())
74562 + retval = true;
74563 + return retval ^ info->invflags;
74564 +}
74565 +
74566 +static struct xt_match gradm_mt_reg __read_mostly = {
74567 + .name = "gradm",
74568 + .revision = 0,
74569 + .family = NFPROTO_UNSPEC,
74570 + .match = gradm_mt,
74571 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74572 + .me = THIS_MODULE,
74573 +};
74574 +
74575 +static int __init gradm_mt_init(void)
74576 +{
74577 + return xt_register_match(&gradm_mt_reg);
74578 +}
74579 +
74580 +static void __exit gradm_mt_exit(void)
74581 +{
74582 + xt_unregister_match(&gradm_mt_reg);
74583 +}
74584 +
74585 +module_init(gradm_mt_init);
74586 +module_exit(gradm_mt_exit);
74587 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74588 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74589 +MODULE_LICENSE("GPL");
74590 +MODULE_ALIAS("ipt_gradm");
74591 +MODULE_ALIAS("ip6t_gradm");
74592 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
74593 index 4fe4fb4..87a89e5 100644
74594 --- a/net/netfilter/xt_statistic.c
74595 +++ b/net/netfilter/xt_statistic.c
74596 @@ -19,7 +19,7 @@
74597 #include <linux/module.h>
74598
74599 struct xt_statistic_priv {
74600 - atomic_t count;
74601 + atomic_unchecked_t count;
74602 } ____cacheline_aligned_in_smp;
74603
74604 MODULE_LICENSE("GPL");
74605 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
74606 break;
74607 case XT_STATISTIC_MODE_NTH:
74608 do {
74609 - oval = atomic_read(&info->master->count);
74610 + oval = atomic_read_unchecked(&info->master->count);
74611 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74612 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74613 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74614 if (nval == 0)
74615 ret = !ret;
74616 break;
74617 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
74618 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74619 if (info->master == NULL)
74620 return -ENOMEM;
74621 - atomic_set(&info->master->count, info->u.nth.count);
74622 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74623
74624 return 0;
74625 }
74626 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
74627 index 1201b6d..bcff8c6 100644
74628 --- a/net/netlink/af_netlink.c
74629 +++ b/net/netlink/af_netlink.c
74630 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
74631 sk->sk_error_report(sk);
74632 }
74633 }
74634 - atomic_inc(&sk->sk_drops);
74635 + atomic_inc_unchecked(&sk->sk_drops);
74636 }
74637
74638 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74639 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
74640 sk_wmem_alloc_get(s),
74641 nlk->cb,
74642 atomic_read(&s->sk_refcnt),
74643 - atomic_read(&s->sk_drops),
74644 + atomic_read_unchecked(&s->sk_drops),
74645 sock_i_ino(s)
74646 );
74647
74648 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
74649 index 732152f..60bb09e 100644
74650 --- a/net/netrom/af_netrom.c
74651 +++ b/net/netrom/af_netrom.c
74652 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74653 struct sock *sk = sock->sk;
74654 struct nr_sock *nr = nr_sk(sk);
74655
74656 + memset(sax, 0, sizeof(*sax));
74657 lock_sock(sk);
74658 if (peer != 0) {
74659 if (sk->sk_state != TCP_ESTABLISHED) {
74660 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74661 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74662 } else {
74663 sax->fsa_ax25.sax25_family = AF_NETROM;
74664 - sax->fsa_ax25.sax25_ndigis = 0;
74665 sax->fsa_ax25.sax25_call = nr->source_addr;
74666 *uaddr_len = sizeof(struct sockaddr_ax25);
74667 }
74668 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
74669 index d9d4970..d5a6a68 100644
74670 --- a/net/packet/af_packet.c
74671 +++ b/net/packet/af_packet.c
74672 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74673
74674 spin_lock(&sk->sk_receive_queue.lock);
74675 po->stats.tp_packets++;
74676 - skb->dropcount = atomic_read(&sk->sk_drops);
74677 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74678 __skb_queue_tail(&sk->sk_receive_queue, skb);
74679 spin_unlock(&sk->sk_receive_queue.lock);
74680 sk->sk_data_ready(sk, skb->len);
74681 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74682 drop_n_acct:
74683 spin_lock(&sk->sk_receive_queue.lock);
74684 po->stats.tp_drops++;
74685 - atomic_inc(&sk->sk_drops);
74686 + atomic_inc_unchecked(&sk->sk_drops);
74687 spin_unlock(&sk->sk_receive_queue.lock);
74688
74689 drop_n_restore:
74690 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74691 case PACKET_HDRLEN:
74692 if (len > sizeof(int))
74693 len = sizeof(int);
74694 - if (copy_from_user(&val, optval, len))
74695 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74696 return -EFAULT;
74697 switch (val) {
74698 case TPACKET_V1:
74699 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74700
74701 if (put_user(len, optlen))
74702 return -EFAULT;
74703 - if (copy_to_user(optval, data, len))
74704 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74705 return -EFAULT;
74706 return 0;
74707 }
74708 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
74709 index d65f699..05aa6ce 100644
74710 --- a/net/phonet/af_phonet.c
74711 +++ b/net/phonet/af_phonet.c
74712 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
74713 {
74714 struct phonet_protocol *pp;
74715
74716 - if (protocol >= PHONET_NPROTO)
74717 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74718 return NULL;
74719
74720 rcu_read_lock();
74721 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
74722 {
74723 int err = 0;
74724
74725 - if (protocol >= PHONET_NPROTO)
74726 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74727 return -EINVAL;
74728
74729 err = proto_register(pp->prot, 1);
74730 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
74731 index 2ba6e9f..409573f 100644
74732 --- a/net/phonet/pep.c
74733 +++ b/net/phonet/pep.c
74734 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74735
74736 case PNS_PEP_CTRL_REQ:
74737 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74738 - atomic_inc(&sk->sk_drops);
74739 + atomic_inc_unchecked(&sk->sk_drops);
74740 break;
74741 }
74742 __skb_pull(skb, 4);
74743 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74744 }
74745
74746 if (pn->rx_credits == 0) {
74747 - atomic_inc(&sk->sk_drops);
74748 + atomic_inc_unchecked(&sk->sk_drops);
74749 err = -ENOBUFS;
74750 break;
74751 }
74752 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
74753 }
74754
74755 if (pn->rx_credits == 0) {
74756 - atomic_inc(&sk->sk_drops);
74757 + atomic_inc_unchecked(&sk->sk_drops);
74758 err = NET_RX_DROP;
74759 break;
74760 }
74761 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
74762 index 4c7eff3..59c727f 100644
74763 --- a/net/phonet/socket.c
74764 +++ b/net/phonet/socket.c
74765 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
74766 pn->resource, sk->sk_state,
74767 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74768 sock_i_uid(sk), sock_i_ino(sk),
74769 - atomic_read(&sk->sk_refcnt), sk,
74770 - atomic_read(&sk->sk_drops), &len);
74771 + atomic_read(&sk->sk_refcnt),
74772 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74773 + NULL,
74774 +#else
74775 + sk,
74776 +#endif
74777 + atomic_read_unchecked(&sk->sk_drops), &len);
74778 }
74779 seq_printf(seq, "%*s\n", 127 - len, "");
74780 return 0;
74781 diff --git a/net/rds/cong.c b/net/rds/cong.c
74782 index e5b65ac..f3b6fb7 100644
74783 --- a/net/rds/cong.c
74784 +++ b/net/rds/cong.c
74785 @@ -78,7 +78,7 @@
74786 * finds that the saved generation number is smaller than the global generation
74787 * number, it wakes up the process.
74788 */
74789 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74790 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74791
74792 /*
74793 * Congestion monitoring
74794 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
74795 rdsdebug("waking map %p for %pI4\n",
74796 map, &map->m_addr);
74797 rds_stats_inc(s_cong_update_received);
74798 - atomic_inc(&rds_cong_generation);
74799 + atomic_inc_unchecked(&rds_cong_generation);
74800 if (waitqueue_active(&map->m_waitq))
74801 wake_up(&map->m_waitq);
74802 if (waitqueue_active(&rds_poll_waitq))
74803 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74804
74805 int rds_cong_updated_since(unsigned long *recent)
74806 {
74807 - unsigned long gen = atomic_read(&rds_cong_generation);
74808 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74809
74810 if (likely(*recent == gen))
74811 return 0;
74812 diff --git a/net/rds/ib.h b/net/rds/ib.h
74813 index edfaaaf..8c89879 100644
74814 --- a/net/rds/ib.h
74815 +++ b/net/rds/ib.h
74816 @@ -128,7 +128,7 @@ struct rds_ib_connection {
74817 /* sending acks */
74818 unsigned long i_ack_flags;
74819 #ifdef KERNEL_HAS_ATOMIC64
74820 - atomic64_t i_ack_next; /* next ACK to send */
74821 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74822 #else
74823 spinlock_t i_ack_lock; /* protect i_ack_next */
74824 u64 i_ack_next; /* next ACK to send */
74825 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
74826 index 51c8689..36c555f 100644
74827 --- a/net/rds/ib_cm.c
74828 +++ b/net/rds/ib_cm.c
74829 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
74830 /* Clear the ACK state */
74831 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74832 #ifdef KERNEL_HAS_ATOMIC64
74833 - atomic64_set(&ic->i_ack_next, 0);
74834 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74835 #else
74836 ic->i_ack_next = 0;
74837 #endif
74838 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
74839 index e29e0ca..fa3a6a3 100644
74840 --- a/net/rds/ib_recv.c
74841 +++ b/net/rds/ib_recv.c
74842 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74843 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74844 int ack_required)
74845 {
74846 - atomic64_set(&ic->i_ack_next, seq);
74847 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74848 if (ack_required) {
74849 smp_mb__before_clear_bit();
74850 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74851 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74852 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74853 smp_mb__after_clear_bit();
74854
74855 - return atomic64_read(&ic->i_ack_next);
74856 + return atomic64_read_unchecked(&ic->i_ack_next);
74857 }
74858 #endif
74859
74860 diff --git a/net/rds/iw.h b/net/rds/iw.h
74861 index 04ce3b1..48119a6 100644
74862 --- a/net/rds/iw.h
74863 +++ b/net/rds/iw.h
74864 @@ -134,7 +134,7 @@ struct rds_iw_connection {
74865 /* sending acks */
74866 unsigned long i_ack_flags;
74867 #ifdef KERNEL_HAS_ATOMIC64
74868 - atomic64_t i_ack_next; /* next ACK to send */
74869 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74870 #else
74871 spinlock_t i_ack_lock; /* protect i_ack_next */
74872 u64 i_ack_next; /* next ACK to send */
74873 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
74874 index 9556d28..f046d0e 100644
74875 --- a/net/rds/iw_cm.c
74876 +++ b/net/rds/iw_cm.c
74877 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
74878 /* Clear the ACK state */
74879 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74880 #ifdef KERNEL_HAS_ATOMIC64
74881 - atomic64_set(&ic->i_ack_next, 0);
74882 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74883 #else
74884 ic->i_ack_next = 0;
74885 #endif
74886 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
74887 index 5e57347..3916042 100644
74888 --- a/net/rds/iw_recv.c
74889 +++ b/net/rds/iw_recv.c
74890 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74891 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74892 int ack_required)
74893 {
74894 - atomic64_set(&ic->i_ack_next, seq);
74895 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74896 if (ack_required) {
74897 smp_mb__before_clear_bit();
74898 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74899 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74900 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74901 smp_mb__after_clear_bit();
74902
74903 - return atomic64_read(&ic->i_ack_next);
74904 + return atomic64_read_unchecked(&ic->i_ack_next);
74905 }
74906 #endif
74907
74908 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
74909 index edac9ef..16bcb98 100644
74910 --- a/net/rds/tcp.c
74911 +++ b/net/rds/tcp.c
74912 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
74913 int val = 1;
74914
74915 set_fs(KERNEL_DS);
74916 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74917 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74918 sizeof(val));
74919 set_fs(oldfs);
74920 }
74921 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
74922 index 1b4fd68..2234175 100644
74923 --- a/net/rds/tcp_send.c
74924 +++ b/net/rds/tcp_send.c
74925 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
74926
74927 oldfs = get_fs();
74928 set_fs(KERNEL_DS);
74929 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74930 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74931 sizeof(val));
74932 set_fs(oldfs);
74933 }
74934 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
74935 index 74c064c..fdec26f 100644
74936 --- a/net/rxrpc/af_rxrpc.c
74937 +++ b/net/rxrpc/af_rxrpc.c
74938 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
74939 __be32 rxrpc_epoch;
74940
74941 /* current debugging ID */
74942 -atomic_t rxrpc_debug_id;
74943 +atomic_unchecked_t rxrpc_debug_id;
74944
74945 /* count of skbs currently in use */
74946 atomic_t rxrpc_n_skbs;
74947 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
74948 index f99cfce..cc529dd 100644
74949 --- a/net/rxrpc/ar-ack.c
74950 +++ b/net/rxrpc/ar-ack.c
74951 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74952
74953 _enter("{%d,%d,%d,%d},",
74954 call->acks_hard, call->acks_unacked,
74955 - atomic_read(&call->sequence),
74956 + atomic_read_unchecked(&call->sequence),
74957 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74958
74959 stop = 0;
74960 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74961
74962 /* each Tx packet has a new serial number */
74963 sp->hdr.serial =
74964 - htonl(atomic_inc_return(&call->conn->serial));
74965 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74966
74967 hdr = (struct rxrpc_header *) txb->head;
74968 hdr->serial = sp->hdr.serial;
74969 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
74970 */
74971 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74972 {
74973 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74974 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74975 }
74976
74977 /*
74978 @@ -629,7 +629,7 @@ process_further:
74979
74980 latest = ntohl(sp->hdr.serial);
74981 hard = ntohl(ack.firstPacket);
74982 - tx = atomic_read(&call->sequence);
74983 + tx = atomic_read_unchecked(&call->sequence);
74984
74985 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74986 latest,
74987 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
74988 goto maybe_reschedule;
74989
74990 send_ACK_with_skew:
74991 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74992 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74993 ntohl(ack.serial));
74994 send_ACK:
74995 mtu = call->conn->trans->peer->if_mtu;
74996 @@ -1173,7 +1173,7 @@ send_ACK:
74997 ackinfo.rxMTU = htonl(5692);
74998 ackinfo.jumbo_max = htonl(4);
74999
75000 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
75001 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
75002 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
75003 ntohl(hdr.serial),
75004 ntohs(ack.maxSkew),
75005 @@ -1191,7 +1191,7 @@ send_ACK:
75006 send_message:
75007 _debug("send message");
75008
75009 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
75010 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
75011 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
75012 send_message_2:
75013
75014 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
75015 index bf656c2..48f9d27 100644
75016 --- a/net/rxrpc/ar-call.c
75017 +++ b/net/rxrpc/ar-call.c
75018 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
75019 spin_lock_init(&call->lock);
75020 rwlock_init(&call->state_lock);
75021 atomic_set(&call->usage, 1);
75022 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
75023 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75024 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
75025
75026 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
75027 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
75028 index 4106ca9..a338d7a 100644
75029 --- a/net/rxrpc/ar-connection.c
75030 +++ b/net/rxrpc/ar-connection.c
75031 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
75032 rwlock_init(&conn->lock);
75033 spin_lock_init(&conn->state_lock);
75034 atomic_set(&conn->usage, 1);
75035 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
75036 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75037 conn->avail_calls = RXRPC_MAXCALLS;
75038 conn->size_align = 4;
75039 conn->header_size = sizeof(struct rxrpc_header);
75040 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
75041 index e7ed43a..6afa140 100644
75042 --- a/net/rxrpc/ar-connevent.c
75043 +++ b/net/rxrpc/ar-connevent.c
75044 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
75045
75046 len = iov[0].iov_len + iov[1].iov_len;
75047
75048 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75049 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75050 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
75051
75052 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75053 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
75054 index 1a2b0633..e8d1382 100644
75055 --- a/net/rxrpc/ar-input.c
75056 +++ b/net/rxrpc/ar-input.c
75057 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
75058 /* track the latest serial number on this connection for ACK packet
75059 * information */
75060 serial = ntohl(sp->hdr.serial);
75061 - hi_serial = atomic_read(&call->conn->hi_serial);
75062 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
75063 while (serial > hi_serial)
75064 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
75065 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
75066 serial);
75067
75068 /* request ACK generation for any ACK or DATA packet that requests
75069 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
75070 index 8e22bd3..f66d1c0 100644
75071 --- a/net/rxrpc/ar-internal.h
75072 +++ b/net/rxrpc/ar-internal.h
75073 @@ -272,8 +272,8 @@ struct rxrpc_connection {
75074 int error; /* error code for local abort */
75075 int debug_id; /* debug ID for printks */
75076 unsigned call_counter; /* call ID counter */
75077 - atomic_t serial; /* packet serial number counter */
75078 - atomic_t hi_serial; /* highest serial number received */
75079 + atomic_unchecked_t serial; /* packet serial number counter */
75080 + atomic_unchecked_t hi_serial; /* highest serial number received */
75081 u8 avail_calls; /* number of calls available */
75082 u8 size_align; /* data size alignment (for security) */
75083 u8 header_size; /* rxrpc + security header size */
75084 @@ -346,7 +346,7 @@ struct rxrpc_call {
75085 spinlock_t lock;
75086 rwlock_t state_lock; /* lock for state transition */
75087 atomic_t usage;
75088 - atomic_t sequence; /* Tx data packet sequence counter */
75089 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
75090 u32 abort_code; /* local/remote abort code */
75091 enum { /* current state of call */
75092 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
75093 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
75094 */
75095 extern atomic_t rxrpc_n_skbs;
75096 extern __be32 rxrpc_epoch;
75097 -extern atomic_t rxrpc_debug_id;
75098 +extern atomic_unchecked_t rxrpc_debug_id;
75099 extern struct workqueue_struct *rxrpc_workqueue;
75100
75101 /*
75102 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
75103 index 87f7135..74d3703 100644
75104 --- a/net/rxrpc/ar-local.c
75105 +++ b/net/rxrpc/ar-local.c
75106 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
75107 spin_lock_init(&local->lock);
75108 rwlock_init(&local->services_lock);
75109 atomic_set(&local->usage, 1);
75110 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
75111 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75112 memcpy(&local->srx, srx, sizeof(*srx));
75113 }
75114
75115 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
75116 index 338d793..47391d0 100644
75117 --- a/net/rxrpc/ar-output.c
75118 +++ b/net/rxrpc/ar-output.c
75119 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
75120 sp->hdr.cid = call->cid;
75121 sp->hdr.callNumber = call->call_id;
75122 sp->hdr.seq =
75123 - htonl(atomic_inc_return(&call->sequence));
75124 + htonl(atomic_inc_return_unchecked(&call->sequence));
75125 sp->hdr.serial =
75126 - htonl(atomic_inc_return(&conn->serial));
75127 + htonl(atomic_inc_return_unchecked(&conn->serial));
75128 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
75129 sp->hdr.userStatus = 0;
75130 sp->hdr.securityIndex = conn->security_ix;
75131 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
75132 index 2754f09..b20e38f 100644
75133 --- a/net/rxrpc/ar-peer.c
75134 +++ b/net/rxrpc/ar-peer.c
75135 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
75136 INIT_LIST_HEAD(&peer->error_targets);
75137 spin_lock_init(&peer->lock);
75138 atomic_set(&peer->usage, 1);
75139 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
75140 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75141 memcpy(&peer->srx, srx, sizeof(*srx));
75142
75143 rxrpc_assess_MTU_size(peer);
75144 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
75145 index 38047f7..9f48511 100644
75146 --- a/net/rxrpc/ar-proc.c
75147 +++ b/net/rxrpc/ar-proc.c
75148 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
75149 atomic_read(&conn->usage),
75150 rxrpc_conn_states[conn->state],
75151 key_serial(conn->key),
75152 - atomic_read(&conn->serial),
75153 - atomic_read(&conn->hi_serial));
75154 + atomic_read_unchecked(&conn->serial),
75155 + atomic_read_unchecked(&conn->hi_serial));
75156
75157 return 0;
75158 }
75159 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
75160 index 92df566..87ec1bf 100644
75161 --- a/net/rxrpc/ar-transport.c
75162 +++ b/net/rxrpc/ar-transport.c
75163 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
75164 spin_lock_init(&trans->client_lock);
75165 rwlock_init(&trans->conn_lock);
75166 atomic_set(&trans->usage, 1);
75167 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
75168 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75169
75170 if (peer->srx.transport.family == AF_INET) {
75171 switch (peer->srx.transport_type) {
75172 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
75173 index 7635107..4670276 100644
75174 --- a/net/rxrpc/rxkad.c
75175 +++ b/net/rxrpc/rxkad.c
75176 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
75177
75178 len = iov[0].iov_len + iov[1].iov_len;
75179
75180 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75181 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75182 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
75183
75184 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75185 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
75186
75187 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
75188
75189 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
75190 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75191 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
75192
75193 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
75194 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
75195 index 1e2eee8..ce3967e 100644
75196 --- a/net/sctp/proc.c
75197 +++ b/net/sctp/proc.c
75198 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
75199 seq_printf(seq,
75200 "%8pK %8pK %-3d %-3d %-2d %-4d "
75201 "%4d %8d %8d %7d %5lu %-5d %5d ",
75202 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
75203 + assoc, sk,
75204 + sctp_sk(sk)->type, sk->sk_state,
75205 assoc->state, hash,
75206 assoc->assoc_id,
75207 assoc->sndbuf_used,
75208 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
75209 index 54a7cd2..944edae 100644
75210 --- a/net/sctp/socket.c
75211 +++ b/net/sctp/socket.c
75212 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
75213 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
75214 if (space_left < addrlen)
75215 return -ENOMEM;
75216 - if (copy_to_user(to, &temp, addrlen))
75217 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
75218 return -EFAULT;
75219 to += addrlen;
75220 cnt++;
75221 diff --git a/net/socket.c b/net/socket.c
75222 index 2dce67a..1e91168 100644
75223 --- a/net/socket.c
75224 +++ b/net/socket.c
75225 @@ -88,6 +88,7 @@
75226 #include <linux/nsproxy.h>
75227 #include <linux/magic.h>
75228 #include <linux/slab.h>
75229 +#include <linux/in.h>
75230
75231 #include <asm/uaccess.h>
75232 #include <asm/unistd.h>
75233 @@ -105,6 +106,8 @@
75234 #include <linux/sockios.h>
75235 #include <linux/atalk.h>
75236
75237 +#include <linux/grsock.h>
75238 +
75239 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
75240 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
75241 unsigned long nr_segs, loff_t pos);
75242 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
75243 &sockfs_dentry_operations, SOCKFS_MAGIC);
75244 }
75245
75246 -static struct vfsmount *sock_mnt __read_mostly;
75247 +struct vfsmount *sock_mnt __read_mostly;
75248
75249 static struct file_system_type sock_fs_type = {
75250 .name = "sockfs",
75251 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
75252 return -EAFNOSUPPORT;
75253 if (type < 0 || type >= SOCK_MAX)
75254 return -EINVAL;
75255 + if (protocol < 0)
75256 + return -EINVAL;
75257
75258 /* Compatibility.
75259
75260 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
75261 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
75262 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
75263
75264 + if(!gr_search_socket(family, type, protocol)) {
75265 + retval = -EACCES;
75266 + goto out;
75267 + }
75268 +
75269 + if (gr_handle_sock_all(family, type, protocol)) {
75270 + retval = -EACCES;
75271 + goto out;
75272 + }
75273 +
75274 retval = sock_create(family, type, protocol, &sock);
75275 if (retval < 0)
75276 goto out;
75277 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75278 if (sock) {
75279 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
75280 if (err >= 0) {
75281 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
75282 + err = -EACCES;
75283 + goto error;
75284 + }
75285 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
75286 + if (err)
75287 + goto error;
75288 +
75289 err = security_socket_bind(sock,
75290 (struct sockaddr *)&address,
75291 addrlen);
75292 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75293 (struct sockaddr *)
75294 &address, addrlen);
75295 }
75296 +error:
75297 fput_light(sock->file, fput_needed);
75298 }
75299 return err;
75300 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
75301 if ((unsigned)backlog > somaxconn)
75302 backlog = somaxconn;
75303
75304 + if (gr_handle_sock_server_other(sock->sk)) {
75305 + err = -EPERM;
75306 + goto error;
75307 + }
75308 +
75309 + err = gr_search_listen(sock);
75310 + if (err)
75311 + goto error;
75312 +
75313 err = security_socket_listen(sock, backlog);
75314 if (!err)
75315 err = sock->ops->listen(sock, backlog);
75316
75317 +error:
75318 fput_light(sock->file, fput_needed);
75319 }
75320 return err;
75321 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75322 newsock->type = sock->type;
75323 newsock->ops = sock->ops;
75324
75325 + if (gr_handle_sock_server_other(sock->sk)) {
75326 + err = -EPERM;
75327 + sock_release(newsock);
75328 + goto out_put;
75329 + }
75330 +
75331 + err = gr_search_accept(sock);
75332 + if (err) {
75333 + sock_release(newsock);
75334 + goto out_put;
75335 + }
75336 +
75337 /*
75338 * We don't need try_module_get here, as the listening socket (sock)
75339 * has the protocol module (sock->ops->owner) held.
75340 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75341 fd_install(newfd, newfile);
75342 err = newfd;
75343
75344 + gr_attach_curr_ip(newsock->sk);
75345 +
75346 out_put:
75347 fput_light(sock->file, fput_needed);
75348 out:
75349 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75350 int, addrlen)
75351 {
75352 struct socket *sock;
75353 + struct sockaddr *sck;
75354 struct sockaddr_storage address;
75355 int err, fput_needed;
75356
75357 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75358 if (err < 0)
75359 goto out_put;
75360
75361 + sck = (struct sockaddr *)&address;
75362 +
75363 + if (gr_handle_sock_client(sck)) {
75364 + err = -EACCES;
75365 + goto out_put;
75366 + }
75367 +
75368 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
75369 + if (err)
75370 + goto out_put;
75371 +
75372 err =
75373 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
75374 if (err)
75375 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
75376 * checking falls down on this.
75377 */
75378 if (copy_from_user(ctl_buf,
75379 - (void __user __force *)msg_sys->msg_control,
75380 + (void __force_user *)msg_sys->msg_control,
75381 ctl_len))
75382 goto out_freectl;
75383 msg_sys->msg_control = ctl_buf;
75384 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
75385 * kernel msghdr to use the kernel address space)
75386 */
75387
75388 - uaddr = (__force void __user *)msg_sys->msg_name;
75389 + uaddr = (void __force_user *)msg_sys->msg_name;
75390 uaddr_len = COMPAT_NAMELEN(msg);
75391 if (MSG_CMSG_COMPAT & flags) {
75392 err = verify_compat_iovec(msg_sys, iov,
75393 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75394 }
75395
75396 ifr = compat_alloc_user_space(buf_size);
75397 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
75398 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
75399
75400 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
75401 return -EFAULT;
75402 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75403 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
75404
75405 if (copy_in_user(rxnfc, compat_rxnfc,
75406 - (void *)(&rxnfc->fs.m_ext + 1) -
75407 - (void *)rxnfc) ||
75408 + (void __user *)(&rxnfc->fs.m_ext + 1) -
75409 + (void __user *)rxnfc) ||
75410 copy_in_user(&rxnfc->fs.ring_cookie,
75411 &compat_rxnfc->fs.ring_cookie,
75412 - (void *)(&rxnfc->fs.location + 1) -
75413 - (void *)&rxnfc->fs.ring_cookie) ||
75414 + (void __user *)(&rxnfc->fs.location + 1) -
75415 + (void __user *)&rxnfc->fs.ring_cookie) ||
75416 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
75417 sizeof(rxnfc->rule_cnt)))
75418 return -EFAULT;
75419 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75420
75421 if (convert_out) {
75422 if (copy_in_user(compat_rxnfc, rxnfc,
75423 - (const void *)(&rxnfc->fs.m_ext + 1) -
75424 - (const void *)rxnfc) ||
75425 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
75426 + (const void __user *)rxnfc) ||
75427 copy_in_user(&compat_rxnfc->fs.ring_cookie,
75428 &rxnfc->fs.ring_cookie,
75429 - (const void *)(&rxnfc->fs.location + 1) -
75430 - (const void *)&rxnfc->fs.ring_cookie) ||
75431 + (const void __user *)(&rxnfc->fs.location + 1) -
75432 + (const void __user *)&rxnfc->fs.ring_cookie) ||
75433 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
75434 sizeof(rxnfc->rule_cnt)))
75435 return -EFAULT;
75436 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
75437 old_fs = get_fs();
75438 set_fs(KERNEL_DS);
75439 err = dev_ioctl(net, cmd,
75440 - (struct ifreq __user __force *) &kifr);
75441 + (struct ifreq __force_user *) &kifr);
75442 set_fs(old_fs);
75443
75444 return err;
75445 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
75446
75447 old_fs = get_fs();
75448 set_fs(KERNEL_DS);
75449 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
75450 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
75451 set_fs(old_fs);
75452
75453 if (cmd == SIOCGIFMAP && !err) {
75454 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
75455 ret |= __get_user(rtdev, &(ur4->rt_dev));
75456 if (rtdev) {
75457 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
75458 - r4.rt_dev = (char __user __force *)devname;
75459 + r4.rt_dev = (char __force_user *)devname;
75460 devname[15] = 0;
75461 } else
75462 r4.rt_dev = NULL;
75463 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
75464 int __user *uoptlen;
75465 int err;
75466
75467 - uoptval = (char __user __force *) optval;
75468 - uoptlen = (int __user __force *) optlen;
75469 + uoptval = (char __force_user *) optval;
75470 + uoptlen = (int __force_user *) optlen;
75471
75472 set_fs(KERNEL_DS);
75473 if (level == SOL_SOCKET)
75474 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
75475 char __user *uoptval;
75476 int err;
75477
75478 - uoptval = (char __user __force *) optval;
75479 + uoptval = (char __force_user *) optval;
75480
75481 set_fs(KERNEL_DS);
75482 if (level == SOL_SOCKET)
75483 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
75484 index 00a1a2a..6a0138a 100644
75485 --- a/net/sunrpc/sched.c
75486 +++ b/net/sunrpc/sched.c
75487 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
75488 #ifdef RPC_DEBUG
75489 static void rpc_task_set_debuginfo(struct rpc_task *task)
75490 {
75491 - static atomic_t rpc_pid;
75492 + static atomic_unchecked_t rpc_pid;
75493
75494 - task->tk_pid = atomic_inc_return(&rpc_pid);
75495 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75496 }
75497 #else
75498 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75499 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
75500 index 71bed1c..5dff36d 100644
75501 --- a/net/sunrpc/svcsock.c
75502 +++ b/net/sunrpc/svcsock.c
75503 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
75504 int buflen, unsigned int base)
75505 {
75506 size_t save_iovlen;
75507 - void __user *save_iovbase;
75508 + void *save_iovbase;
75509 unsigned int i;
75510 int ret;
75511
75512 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
75513 index 09af4fa..77110a9 100644
75514 --- a/net/sunrpc/xprtrdma/svc_rdma.c
75515 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
75516 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
75517 static unsigned int min_max_inline = 4096;
75518 static unsigned int max_max_inline = 65536;
75519
75520 -atomic_t rdma_stat_recv;
75521 -atomic_t rdma_stat_read;
75522 -atomic_t rdma_stat_write;
75523 -atomic_t rdma_stat_sq_starve;
75524 -atomic_t rdma_stat_rq_starve;
75525 -atomic_t rdma_stat_rq_poll;
75526 -atomic_t rdma_stat_rq_prod;
75527 -atomic_t rdma_stat_sq_poll;
75528 -atomic_t rdma_stat_sq_prod;
75529 +atomic_unchecked_t rdma_stat_recv;
75530 +atomic_unchecked_t rdma_stat_read;
75531 +atomic_unchecked_t rdma_stat_write;
75532 +atomic_unchecked_t rdma_stat_sq_starve;
75533 +atomic_unchecked_t rdma_stat_rq_starve;
75534 +atomic_unchecked_t rdma_stat_rq_poll;
75535 +atomic_unchecked_t rdma_stat_rq_prod;
75536 +atomic_unchecked_t rdma_stat_sq_poll;
75537 +atomic_unchecked_t rdma_stat_sq_prod;
75538
75539 /* Temporary NFS request map and context caches */
75540 struct kmem_cache *svc_rdma_map_cachep;
75541 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
75542 len -= *ppos;
75543 if (len > *lenp)
75544 len = *lenp;
75545 - if (len && copy_to_user(buffer, str_buf, len))
75546 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75547 return -EFAULT;
75548 *lenp = len;
75549 *ppos += len;
75550 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
75551 {
75552 .procname = "rdma_stat_read",
75553 .data = &rdma_stat_read,
75554 - .maxlen = sizeof(atomic_t),
75555 + .maxlen = sizeof(atomic_unchecked_t),
75556 .mode = 0644,
75557 .proc_handler = read_reset_stat,
75558 },
75559 {
75560 .procname = "rdma_stat_recv",
75561 .data = &rdma_stat_recv,
75562 - .maxlen = sizeof(atomic_t),
75563 + .maxlen = sizeof(atomic_unchecked_t),
75564 .mode = 0644,
75565 .proc_handler = read_reset_stat,
75566 },
75567 {
75568 .procname = "rdma_stat_write",
75569 .data = &rdma_stat_write,
75570 - .maxlen = sizeof(atomic_t),
75571 + .maxlen = sizeof(atomic_unchecked_t),
75572 .mode = 0644,
75573 .proc_handler = read_reset_stat,
75574 },
75575 {
75576 .procname = "rdma_stat_sq_starve",
75577 .data = &rdma_stat_sq_starve,
75578 - .maxlen = sizeof(atomic_t),
75579 + .maxlen = sizeof(atomic_unchecked_t),
75580 .mode = 0644,
75581 .proc_handler = read_reset_stat,
75582 },
75583 {
75584 .procname = "rdma_stat_rq_starve",
75585 .data = &rdma_stat_rq_starve,
75586 - .maxlen = sizeof(atomic_t),
75587 + .maxlen = sizeof(atomic_unchecked_t),
75588 .mode = 0644,
75589 .proc_handler = read_reset_stat,
75590 },
75591 {
75592 .procname = "rdma_stat_rq_poll",
75593 .data = &rdma_stat_rq_poll,
75594 - .maxlen = sizeof(atomic_t),
75595 + .maxlen = sizeof(atomic_unchecked_t),
75596 .mode = 0644,
75597 .proc_handler = read_reset_stat,
75598 },
75599 {
75600 .procname = "rdma_stat_rq_prod",
75601 .data = &rdma_stat_rq_prod,
75602 - .maxlen = sizeof(atomic_t),
75603 + .maxlen = sizeof(atomic_unchecked_t),
75604 .mode = 0644,
75605 .proc_handler = read_reset_stat,
75606 },
75607 {
75608 .procname = "rdma_stat_sq_poll",
75609 .data = &rdma_stat_sq_poll,
75610 - .maxlen = sizeof(atomic_t),
75611 + .maxlen = sizeof(atomic_unchecked_t),
75612 .mode = 0644,
75613 .proc_handler = read_reset_stat,
75614 },
75615 {
75616 .procname = "rdma_stat_sq_prod",
75617 .data = &rdma_stat_sq_prod,
75618 - .maxlen = sizeof(atomic_t),
75619 + .maxlen = sizeof(atomic_unchecked_t),
75620 .mode = 0644,
75621 .proc_handler = read_reset_stat,
75622 },
75623 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75624 index df67211..c354b13 100644
75625 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75626 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75627 @@ -499,7 +499,7 @@ next_sge:
75628 svc_rdma_put_context(ctxt, 0);
75629 goto out;
75630 }
75631 - atomic_inc(&rdma_stat_read);
75632 + atomic_inc_unchecked(&rdma_stat_read);
75633
75634 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75635 chl_map->ch[ch_no].count -= read_wr.num_sge;
75636 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75637 dto_q);
75638 list_del_init(&ctxt->dto_q);
75639 } else {
75640 - atomic_inc(&rdma_stat_rq_starve);
75641 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75642 clear_bit(XPT_DATA, &xprt->xpt_flags);
75643 ctxt = NULL;
75644 }
75645 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75646 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75647 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75648 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75649 - atomic_inc(&rdma_stat_recv);
75650 + atomic_inc_unchecked(&rdma_stat_recv);
75651
75652 /* Build up the XDR from the receive buffers. */
75653 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75654 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75655 index 249a835..fb2794b 100644
75656 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75657 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75658 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
75659 write_wr.wr.rdma.remote_addr = to;
75660
75661 /* Post It */
75662 - atomic_inc(&rdma_stat_write);
75663 + atomic_inc_unchecked(&rdma_stat_write);
75664 if (svc_rdma_send(xprt, &write_wr))
75665 goto err;
75666 return 0;
75667 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75668 index ba1296d..0fec1a5 100644
75669 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
75670 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75671 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75672 return;
75673
75674 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75675 - atomic_inc(&rdma_stat_rq_poll);
75676 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75677
75678 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75679 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75680 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75681 }
75682
75683 if (ctxt)
75684 - atomic_inc(&rdma_stat_rq_prod);
75685 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75686
75687 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75688 /*
75689 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75690 return;
75691
75692 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75693 - atomic_inc(&rdma_stat_sq_poll);
75694 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75695 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75696 if (wc.status != IB_WC_SUCCESS)
75697 /* Close the transport */
75698 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75699 }
75700
75701 if (ctxt)
75702 - atomic_inc(&rdma_stat_sq_prod);
75703 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75704 }
75705
75706 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75707 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
75708 spin_lock_bh(&xprt->sc_lock);
75709 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75710 spin_unlock_bh(&xprt->sc_lock);
75711 - atomic_inc(&rdma_stat_sq_starve);
75712 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75713
75714 /* See if we can opportunistically reap SQ WR to make room */
75715 sq_cq_reap(xprt);
75716 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
75717 index e758139..d29ea47 100644
75718 --- a/net/sysctl_net.c
75719 +++ b/net/sysctl_net.c
75720 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
75721 struct ctl_table *table)
75722 {
75723 /* Allow network administrator to have same access as root. */
75724 - if (capable(CAP_NET_ADMIN)) {
75725 + if (capable_nolog(CAP_NET_ADMIN)) {
75726 int mode = (table->mode >> 6) & 7;
75727 return (mode << 6) | (mode << 3) | mode;
75728 }
75729 diff --git a/net/tipc/link.c b/net/tipc/link.c
75730 index ae98a72..7bb6056 100644
75731 --- a/net/tipc/link.c
75732 +++ b/net/tipc/link.c
75733 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
75734 struct tipc_msg fragm_hdr;
75735 struct sk_buff *buf, *buf_chain, *prev;
75736 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75737 - const unchar *sect_crs;
75738 + const unchar __user *sect_crs;
75739 int curr_sect;
75740 u32 fragm_no;
75741
75742 @@ -1247,7 +1247,7 @@ again:
75743
75744 if (!sect_rest) {
75745 sect_rest = msg_sect[++curr_sect].iov_len;
75746 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75747 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75748 }
75749
75750 if (sect_rest < fragm_rest)
75751 @@ -1266,7 +1266,7 @@ error:
75752 }
75753 } else
75754 skb_copy_to_linear_data_offset(buf, fragm_crs,
75755 - sect_crs, sz);
75756 + (const void __force_kernel *)sect_crs, sz);
75757 sect_crs += sz;
75758 sect_rest -= sz;
75759 fragm_crs += sz;
75760 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
75761 index 83d5096..dcba497 100644
75762 --- a/net/tipc/msg.c
75763 +++ b/net/tipc/msg.c
75764 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
75765 msg_sect[cnt].iov_len);
75766 else
75767 skb_copy_to_linear_data_offset(*buf, pos,
75768 - msg_sect[cnt].iov_base,
75769 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75770 msg_sect[cnt].iov_len);
75771 pos += msg_sect[cnt].iov_len;
75772 }
75773 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
75774 index 1983717..4d6102c 100644
75775 --- a/net/tipc/subscr.c
75776 +++ b/net/tipc/subscr.c
75777 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
75778 {
75779 struct iovec msg_sect;
75780
75781 - msg_sect.iov_base = (void *)&sub->evt;
75782 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75783 msg_sect.iov_len = sizeof(struct tipc_event);
75784
75785 sub->evt.event = htohl(event, sub->swap);
75786 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
75787 index d99678a..3514a21 100644
75788 --- a/net/unix/af_unix.c
75789 +++ b/net/unix/af_unix.c
75790 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
75791 err = -ECONNREFUSED;
75792 if (!S_ISSOCK(inode->i_mode))
75793 goto put_fail;
75794 +
75795 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75796 + err = -EACCES;
75797 + goto put_fail;
75798 + }
75799 +
75800 u = unix_find_socket_byinode(inode);
75801 if (!u)
75802 goto put_fail;
75803 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
75804 if (u) {
75805 struct dentry *dentry;
75806 dentry = unix_sk(u)->dentry;
75807 +
75808 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75809 + err = -EPERM;
75810 + sock_put(u);
75811 + goto fail;
75812 + }
75813 +
75814 if (dentry)
75815 touch_atime(unix_sk(u)->mnt, dentry);
75816 } else
75817 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
75818 err = security_path_mknod(&path, dentry, mode, 0);
75819 if (err)
75820 goto out_mknod_drop_write;
75821 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75822 + err = -EACCES;
75823 + goto out_mknod_drop_write;
75824 + }
75825 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75826 out_mknod_drop_write:
75827 mnt_drop_write(path.mnt);
75828 if (err)
75829 goto out_mknod_dput;
75830 +
75831 + gr_handle_create(dentry, path.mnt);
75832 +
75833 mutex_unlock(&path.dentry->d_inode->i_mutex);
75834 dput(path.dentry);
75835 path.dentry = dentry;
75836 diff --git a/net/wireless/core.h b/net/wireless/core.h
75837 index b9ec306..b4a563e 100644
75838 --- a/net/wireless/core.h
75839 +++ b/net/wireless/core.h
75840 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75841 struct mutex mtx;
75842
75843 /* rfkill support */
75844 - struct rfkill_ops rfkill_ops;
75845 + rfkill_ops_no_const rfkill_ops;
75846 struct rfkill *rfkill;
75847 struct work_struct rfkill_sync;
75848
75849 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
75850 index 0af7f54..c916d2f 100644
75851 --- a/net/wireless/wext-core.c
75852 +++ b/net/wireless/wext-core.c
75853 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75854 */
75855
75856 /* Support for very large requests */
75857 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75858 - (user_length > descr->max_tokens)) {
75859 + if (user_length > descr->max_tokens) {
75860 /* Allow userspace to GET more than max so
75861 * we can support any size GET requests.
75862 * There is still a limit : -ENOMEM.
75863 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75864 }
75865 }
75866
75867 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75868 - /*
75869 - * If this is a GET, but not NOMAX, it means that the extra
75870 - * data is not bounded by userspace, but by max_tokens. Thus
75871 - * set the length to max_tokens. This matches the extra data
75872 - * allocation.
75873 - * The driver should fill it with the number of tokens it
75874 - * provided, and it may check iwp->length rather than having
75875 - * knowledge of max_tokens. If the driver doesn't change the
75876 - * iwp->length, this ioctl just copies back max_token tokens
75877 - * filled with zeroes. Hopefully the driver isn't claiming
75878 - * them to be valid data.
75879 - */
75880 - iwp->length = descr->max_tokens;
75881 - }
75882 -
75883 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75884
75885 iwp->length += essid_compat;
75886 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
75887 index 9049a5c..cfa6f5c 100644
75888 --- a/net/xfrm/xfrm_policy.c
75889 +++ b/net/xfrm/xfrm_policy.c
75890 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
75891 {
75892 policy->walk.dead = 1;
75893
75894 - atomic_inc(&policy->genid);
75895 + atomic_inc_unchecked(&policy->genid);
75896
75897 if (del_timer(&policy->timer))
75898 xfrm_pol_put(policy);
75899 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
75900 hlist_add_head(&policy->bydst, chain);
75901 xfrm_pol_hold(policy);
75902 net->xfrm.policy_count[dir]++;
75903 - atomic_inc(&flow_cache_genid);
75904 + atomic_inc_unchecked(&flow_cache_genid);
75905 if (delpol)
75906 __xfrm_policy_unlink(delpol, dir);
75907 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75908 @@ -1530,7 +1530,7 @@ free_dst:
75909 goto out;
75910 }
75911
75912 -static int inline
75913 +static inline int
75914 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75915 {
75916 if (!*target) {
75917 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
75918 return 0;
75919 }
75920
75921 -static int inline
75922 +static inline int
75923 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75924 {
75925 #ifdef CONFIG_XFRM_SUB_POLICY
75926 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75927 #endif
75928 }
75929
75930 -static int inline
75931 +static inline int
75932 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75933 {
75934 #ifdef CONFIG_XFRM_SUB_POLICY
75935 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
75936
75937 xdst->num_pols = num_pols;
75938 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75939 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75940 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75941
75942 return xdst;
75943 }
75944 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
75945 if (xdst->xfrm_genid != dst->xfrm->genid)
75946 return 0;
75947 if (xdst->num_pols > 0 &&
75948 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75949 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75950 return 0;
75951
75952 mtu = dst_mtu(dst->child);
75953 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
75954 sizeof(pol->xfrm_vec[i].saddr));
75955 pol->xfrm_vec[i].encap_family = mp->new_family;
75956 /* flush bundles */
75957 - atomic_inc(&pol->genid);
75958 + atomic_inc_unchecked(&pol->genid);
75959 }
75960 }
75961
75962 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
75963 index d2b366c..51ff91ebc 100644
75964 --- a/scripts/Makefile.build
75965 +++ b/scripts/Makefile.build
75966 @@ -109,7 +109,7 @@ endif
75967 endif
75968
75969 # Do not include host rules unless needed
75970 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75971 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75972 include scripts/Makefile.host
75973 endif
75974
75975 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
75976 index 686cb0d..9d653bf 100644
75977 --- a/scripts/Makefile.clean
75978 +++ b/scripts/Makefile.clean
75979 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
75980 __clean-files := $(extra-y) $(always) \
75981 $(targets) $(clean-files) \
75982 $(host-progs) \
75983 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75984 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75985 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75986
75987 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75988
75989 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
75990 index 1ac414f..a1c1451 100644
75991 --- a/scripts/Makefile.host
75992 +++ b/scripts/Makefile.host
75993 @@ -31,6 +31,7 @@
75994 # Note: Shared libraries consisting of C++ files are not supported
75995
75996 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75997 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75998
75999 # C code
76000 # Executables compiled from a single .c file
76001 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
76002 # Shared libaries (only .c supported)
76003 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
76004 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
76005 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
76006 # Remove .so files from "xxx-objs"
76007 host-cobjs := $(filter-out %.so,$(host-cobjs))
76008
76009 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
76010 index cb1f50c..cef2a7c 100644
76011 --- a/scripts/basic/fixdep.c
76012 +++ b/scripts/basic/fixdep.c
76013 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
76014 /*
76015 * Lookup a value in the configuration string.
76016 */
76017 -static int is_defined_config(const char *name, int len, unsigned int hash)
76018 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
76019 {
76020 struct item *aux;
76021
76022 @@ -211,10 +211,10 @@ static void clear_config(void)
76023 /*
76024 * Record the use of a CONFIG_* word.
76025 */
76026 -static void use_config(const char *m, int slen)
76027 +static void use_config(const char *m, unsigned int slen)
76028 {
76029 unsigned int hash = strhash(m, slen);
76030 - int c, i;
76031 + unsigned int c, i;
76032
76033 if (is_defined_config(m, slen, hash))
76034 return;
76035 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
76036
76037 static void parse_config_file(const char *map, size_t len)
76038 {
76039 - const int *end = (const int *) (map + len);
76040 + const unsigned int *end = (const unsigned int *) (map + len);
76041 /* start at +1, so that p can never be < map */
76042 - const int *m = (const int *) map + 1;
76043 + const unsigned int *m = (const unsigned int *) map + 1;
76044 const char *p, *q;
76045
76046 for (; m < end; m++) {
76047 @@ -406,7 +406,7 @@ static void print_deps(void)
76048 static void traps(void)
76049 {
76050 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
76051 - int *p = (int *)test;
76052 + unsigned int *p = (unsigned int *)test;
76053
76054 if (*p != INT_CONF) {
76055 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
76056 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
76057 new file mode 100644
76058 index 0000000..8729101
76059 --- /dev/null
76060 +++ b/scripts/gcc-plugin.sh
76061 @@ -0,0 +1,2 @@
76062 +#!/bin/sh
76063 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
76064 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
76065 index f936d1f..a66d95f 100644
76066 --- a/scripts/mod/file2alias.c
76067 +++ b/scripts/mod/file2alias.c
76068 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
76069 unsigned long size, unsigned long id_size,
76070 void *symval)
76071 {
76072 - int i;
76073 + unsigned int i;
76074
76075 if (size % id_size || size < id_size) {
76076 if (cross_build != 0)
76077 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
76078 /* USB is special because the bcdDevice can be matched against a numeric range */
76079 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
76080 static void do_usb_entry(struct usb_device_id *id,
76081 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
76082 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
76083 unsigned char range_lo, unsigned char range_hi,
76084 unsigned char max, struct module *mod)
76085 {
76086 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
76087 {
76088 unsigned int devlo, devhi;
76089 unsigned char chi, clo, max;
76090 - int ndigits;
76091 + unsigned int ndigits;
76092
76093 id->match_flags = TO_NATIVE(id->match_flags);
76094 id->idVendor = TO_NATIVE(id->idVendor);
76095 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
76096 for (i = 0; i < count; i++) {
76097 const char *id = (char *)devs[i].id;
76098 char acpi_id[sizeof(devs[0].id)];
76099 - int j;
76100 + unsigned int j;
76101
76102 buf_printf(&mod->dev_table_buf,
76103 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76104 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76105
76106 for (j = 0; j < PNP_MAX_DEVICES; j++) {
76107 const char *id = (char *)card->devs[j].id;
76108 - int i2, j2;
76109 + unsigned int i2, j2;
76110 int dup = 0;
76111
76112 if (!id[0])
76113 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76114 /* add an individual alias for every device entry */
76115 if (!dup) {
76116 char acpi_id[sizeof(card->devs[0].id)];
76117 - int k;
76118 + unsigned int k;
76119
76120 buf_printf(&mod->dev_table_buf,
76121 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76122 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
76123 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
76124 char *alias)
76125 {
76126 - int i, j;
76127 + unsigned int i, j;
76128
76129 sprintf(alias, "dmi*");
76130
76131 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
76132 index 2bd594e..d43245e 100644
76133 --- a/scripts/mod/modpost.c
76134 +++ b/scripts/mod/modpost.c
76135 @@ -919,6 +919,7 @@ enum mismatch {
76136 ANY_INIT_TO_ANY_EXIT,
76137 ANY_EXIT_TO_ANY_INIT,
76138 EXPORT_TO_INIT_EXIT,
76139 + DATA_TO_TEXT
76140 };
76141
76142 struct sectioncheck {
76143 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
76144 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
76145 .mismatch = EXPORT_TO_INIT_EXIT,
76146 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
76147 +},
76148 +/* Do not reference code from writable data */
76149 +{
76150 + .fromsec = { DATA_SECTIONS, NULL },
76151 + .tosec = { TEXT_SECTIONS, NULL },
76152 + .mismatch = DATA_TO_TEXT
76153 }
76154 };
76155
76156 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
76157 continue;
76158 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
76159 continue;
76160 - if (sym->st_value == addr)
76161 - return sym;
76162 /* Find a symbol nearby - addr are maybe negative */
76163 d = sym->st_value - addr;
76164 + if (d == 0)
76165 + return sym;
76166 if (d < 0)
76167 d = addr - sym->st_value;
76168 if (d < distance) {
76169 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
76170 tosym, prl_to, prl_to, tosym);
76171 free(prl_to);
76172 break;
76173 + case DATA_TO_TEXT:
76174 +/*
76175 + fprintf(stderr,
76176 + "The variable %s references\n"
76177 + "the %s %s%s%s\n",
76178 + fromsym, to, sec2annotation(tosec), tosym, to_p);
76179 +*/
76180 + break;
76181 }
76182 fprintf(stderr, "\n");
76183 }
76184 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
76185 static void check_sec_ref(struct module *mod, const char *modname,
76186 struct elf_info *elf)
76187 {
76188 - int i;
76189 + unsigned int i;
76190 Elf_Shdr *sechdrs = elf->sechdrs;
76191
76192 /* Walk through all sections */
76193 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
76194 va_end(ap);
76195 }
76196
76197 -void buf_write(struct buffer *buf, const char *s, int len)
76198 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
76199 {
76200 if (buf->size - buf->pos < len) {
76201 buf->size += len + SZ;
76202 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
76203 if (fstat(fileno(file), &st) < 0)
76204 goto close_write;
76205
76206 - if (st.st_size != b->pos)
76207 + if (st.st_size != (off_t)b->pos)
76208 goto close_write;
76209
76210 tmp = NOFAIL(malloc(b->pos));
76211 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
76212 index 2031119..b5433af 100644
76213 --- a/scripts/mod/modpost.h
76214 +++ b/scripts/mod/modpost.h
76215 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
76216
76217 struct buffer {
76218 char *p;
76219 - int pos;
76220 - int size;
76221 + unsigned int pos;
76222 + unsigned int size;
76223 };
76224
76225 void __attribute__((format(printf, 2, 3)))
76226 buf_printf(struct buffer *buf, const char *fmt, ...);
76227
76228 void
76229 -buf_write(struct buffer *buf, const char *s, int len);
76230 +buf_write(struct buffer *buf, const char *s, unsigned int len);
76231
76232 struct module {
76233 struct module *next;
76234 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
76235 index 9dfcd6d..099068e 100644
76236 --- a/scripts/mod/sumversion.c
76237 +++ b/scripts/mod/sumversion.c
76238 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
76239 goto out;
76240 }
76241
76242 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
76243 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
76244 warn("writing sum in %s failed: %s\n",
76245 filename, strerror(errno));
76246 goto out;
76247 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
76248 index 5c11312..72742b5 100644
76249 --- a/scripts/pnmtologo.c
76250 +++ b/scripts/pnmtologo.c
76251 @@ -237,14 +237,14 @@ static void write_header(void)
76252 fprintf(out, " * Linux logo %s\n", logoname);
76253 fputs(" */\n\n", out);
76254 fputs("#include <linux/linux_logo.h>\n\n", out);
76255 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
76256 + fprintf(out, "static unsigned char %s_data[] = {\n",
76257 logoname);
76258 }
76259
76260 static void write_footer(void)
76261 {
76262 fputs("\n};\n\n", out);
76263 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
76264 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
76265 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
76266 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
76267 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
76268 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
76269 fputs("\n};\n\n", out);
76270
76271 /* write logo clut */
76272 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
76273 + fprintf(out, "static unsigned char %s_clut[] = {\n",
76274 logoname);
76275 write_hex_cnt = 0;
76276 for (i = 0; i < logo_clutsize; i++) {
76277 diff --git a/security/Kconfig b/security/Kconfig
76278 index 51bd5a0..eeabc9f 100644
76279 --- a/security/Kconfig
76280 +++ b/security/Kconfig
76281 @@ -4,6 +4,627 @@
76282
76283 menu "Security options"
76284
76285 +source grsecurity/Kconfig
76286 +
76287 +menu "PaX"
76288 +
76289 + config ARCH_TRACK_EXEC_LIMIT
76290 + bool
76291 +
76292 + config PAX_KERNEXEC_PLUGIN
76293 + bool
76294 +
76295 + config PAX_PER_CPU_PGD
76296 + bool
76297 +
76298 + config TASK_SIZE_MAX_SHIFT
76299 + int
76300 + depends on X86_64
76301 + default 47 if !PAX_PER_CPU_PGD
76302 + default 42 if PAX_PER_CPU_PGD
76303 +
76304 + config PAX_ENABLE_PAE
76305 + bool
76306 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
76307 +
76308 +config PAX
76309 + bool "Enable various PaX features"
76310 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
76311 + help
76312 + This allows you to enable various PaX features. PaX adds
76313 + intrusion prevention mechanisms to the kernel that reduce
76314 + the risks posed by exploitable memory corruption bugs.
76315 +
76316 +menu "PaX Control"
76317 + depends on PAX
76318 +
76319 +config PAX_SOFTMODE
76320 + bool 'Support soft mode'
76321 + help
76322 + Enabling this option will allow you to run PaX in soft mode, that
76323 + is, PaX features will not be enforced by default, only on executables
76324 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
76325 + support as they are the only way to mark executables for soft mode use.
76326 +
76327 + Soft mode can be activated by using the "pax_softmode=1" kernel command
76328 + line option on boot. Furthermore you can control various PaX features
76329 + at runtime via the entries in /proc/sys/kernel/pax.
76330 +
76331 +config PAX_EI_PAX
76332 + bool 'Use legacy ELF header marking'
76333 + help
76334 + Enabling this option will allow you to control PaX features on
76335 + a per executable basis via the 'chpax' utility available at
76336 + http://pax.grsecurity.net/. The control flags will be read from
76337 + an otherwise reserved part of the ELF header. This marking has
76338 + numerous drawbacks (no support for soft-mode, toolchain does not
76339 + know about the non-standard use of the ELF header) therefore it
76340 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
76341 + support.
76342 +
76343 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76344 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
76345 + option otherwise they will not get any protection.
76346 +
76347 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
76348 + support as well, they will override the legacy EI_PAX marks.
76349 +
76350 +config PAX_PT_PAX_FLAGS
76351 + bool 'Use ELF program header marking'
76352 + help
76353 + Enabling this option will allow you to control PaX features on
76354 + a per executable basis via the 'paxctl' utility available at
76355 + http://pax.grsecurity.net/. The control flags will be read from
76356 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
76357 + has the benefits of supporting both soft mode and being fully
76358 + integrated into the toolchain (the binutils patch is available
76359 + from http://pax.grsecurity.net).
76360 +
76361 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76362 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76363 + support otherwise they will not get any protection.
76364 +
76365 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76366 + must make sure that the marks are the same if a binary has both marks.
76367 +
76368 + Note that if you enable the legacy EI_PAX marking support as well,
76369 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
76370 +
76371 +config PAX_XATTR_PAX_FLAGS
76372 + bool 'Use filesystem extended attributes marking'
76373 + depends on EXPERT
76374 + select CIFS_XATTR if CIFS
76375 + select EXT2_FS_XATTR if EXT2_FS
76376 + select EXT3_FS_XATTR if EXT3_FS
76377 + select EXT4_FS_XATTR if EXT4_FS
76378 + select JFFS2_FS_XATTR if JFFS2_FS
76379 + select REISERFS_FS_XATTR if REISERFS_FS
76380 + select SQUASHFS_XATTR if SQUASHFS
76381 + select TMPFS_XATTR if TMPFS
76382 + select UBIFS_FS_XATTR if UBIFS_FS
76383 + help
76384 + Enabling this option will allow you to control PaX features on
76385 + a per executable basis via the 'setfattr' utility. The control
76386 + flags will be read from the user.pax.flags extended attribute of
76387 + the file. This marking has the benefit of supporting binary-only
76388 + applications that self-check themselves (e.g., skype) and would
76389 + not tolerate chpax/paxctl changes. The main drawback is that
76390 + extended attributes are not supported by some filesystems (e.g.,
76391 + isofs, udf, vfat) so copying files through such filesystems will
76392 + lose the extended attributes and these PaX markings.
76393 +
76394 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76395 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76396 + support otherwise they will not get any protection.
76397 +
76398 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76399 + must make sure that the marks are the same if a binary has both marks.
76400 +
76401 + Note that if you enable the legacy EI_PAX marking support as well,
76402 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
76403 +
76404 +choice
76405 + prompt 'MAC system integration'
76406 + default PAX_HAVE_ACL_FLAGS
76407 + help
76408 + Mandatory Access Control systems have the option of controlling
76409 + PaX flags on a per executable basis, choose the method supported
76410 + by your particular system.
76411 +
76412 + - "none": if your MAC system does not interact with PaX,
76413 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
76414 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
76415 +
76416 + NOTE: this option is for developers/integrators only.
76417 +
76418 + config PAX_NO_ACL_FLAGS
76419 + bool 'none'
76420 +
76421 + config PAX_HAVE_ACL_FLAGS
76422 + bool 'direct'
76423 +
76424 + config PAX_HOOK_ACL_FLAGS
76425 + bool 'hook'
76426 +endchoice
76427 +
76428 +endmenu
76429 +
76430 +menu "Non-executable pages"
76431 + depends on PAX
76432 +
76433 +config PAX_NOEXEC
76434 + bool "Enforce non-executable pages"
76435 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
76436 + help
76437 + By design some architectures do not allow for protecting memory
76438 + pages against execution or even if they do, Linux does not make
76439 + use of this feature. In practice this means that if a page is
76440 + readable (such as the stack or heap) it is also executable.
76441 +
76442 + There is a well known exploit technique that makes use of this
76443 + fact and a common programming mistake where an attacker can
76444 + introduce code of his choice somewhere in the attacked program's
76445 + memory (typically the stack or the heap) and then execute it.
76446 +
76447 + If the attacked program was running with different (typically
76448 + higher) privileges than that of the attacker, then he can elevate
76449 + his own privilege level (e.g. get a root shell, write to files for
76450 + which he does not have write access to, etc).
76451 +
76452 + Enabling this option will let you choose from various features
76453 + that prevent the injection and execution of 'foreign' code in
76454 + a program.
76455 +
76456 + This will also break programs that rely on the old behaviour and
76457 + expect that dynamically allocated memory via the malloc() family
76458 + of functions is executable (which it is not). Notable examples
76459 + are the XFree86 4.x server, the java runtime and wine.
76460 +
76461 +config PAX_PAGEEXEC
76462 + bool "Paging based non-executable pages"
76463 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
76464 + select S390_SWITCH_AMODE if S390
76465 + select S390_EXEC_PROTECT if S390
76466 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76467 + help
76468 + This implementation is based on the paging feature of the CPU.
76469 + On i386 without hardware non-executable bit support there is a
76470 + variable but usually low performance impact, however on Intel's
76471 + P4 core based CPUs it is very high so you should not enable this
76472 + for kernels meant to be used on such CPUs.
76473 +
76474 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76475 + with hardware non-executable bit support there is no performance
76476 + impact, on ppc the impact is negligible.
76477 +
76478 + Note that several architectures require various emulations due to
76479 + badly designed userland ABIs, this will cause a performance impact
76480 + but will disappear as soon as userland is fixed. For example, ppc
76481 + userland MUST have been built with secure-plt by a recent toolchain.
76482 +
76483 +config PAX_SEGMEXEC
76484 + bool "Segmentation based non-executable pages"
76485 + depends on PAX_NOEXEC && X86_32
76486 + help
76487 + This implementation is based on the segmentation feature of the
76488 + CPU and has a very small performance impact, however applications
76489 + will be limited to a 1.5 GB address space instead of the normal
76490 + 3 GB.
76491 +
76492 +config PAX_EMUTRAMP
76493 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76494 + default y if PARISC
76495 + help
76496 + There are some programs and libraries that for one reason or
76497 + another attempt to execute special small code snippets from
76498 + non-executable memory pages. Most notable examples are the
76499 + signal handler return code generated by the kernel itself and
76500 + the GCC trampolines.
76501 +
76502 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76503 + such programs will no longer work under your kernel.
76504 +
76505 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76506 + utilities to enable trampoline emulation for the affected programs
76507 + yet still have the protection provided by the non-executable pages.
76508 +
76509 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76510 + your system will not even boot.
76511 +
76512 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76513 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76514 + for the affected files.
76515 +
76516 + NOTE: enabling this feature *may* open up a loophole in the
76517 + protection provided by non-executable pages that an attacker
76518 + could abuse. Therefore the best solution is to not have any
76519 + files on your system that would require this option. This can
76520 + be achieved by not using libc5 (which relies on the kernel
76521 + signal handler return code) and not using or rewriting programs
76522 + that make use of the nested function implementation of GCC.
76523 + Skilled users can just fix GCC itself so that it implements
76524 + nested function calls in a way that does not interfere with PaX.
76525 +
76526 +config PAX_EMUSIGRT
76527 + bool "Automatically emulate sigreturn trampolines"
76528 + depends on PAX_EMUTRAMP && PARISC
76529 + default y
76530 + help
76531 + Enabling this option will have the kernel automatically detect
76532 + and emulate signal return trampolines executing on the stack
76533 + that would otherwise lead to task termination.
76534 +
76535 + This solution is intended as a temporary one for users with
76536 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76537 + Modula-3 runtime, etc) or executables linked to such, basically
76538 + everything that does not specify its own SA_RESTORER function in
76539 + normal executable memory like glibc 2.1+ does.
76540 +
76541 + On parisc you MUST enable this option, otherwise your system will
76542 + not even boot.
76543 +
76544 + NOTE: this feature cannot be disabled on a per executable basis
76545 + and since it *does* open up a loophole in the protection provided
76546 + by non-executable pages, the best solution is to not have any
76547 + files on your system that would require this option.
76548 +
76549 +config PAX_MPROTECT
76550 + bool "Restrict mprotect()"
76551 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76552 + help
76553 + Enabling this option will prevent programs from
76554 + - changing the executable status of memory pages that were
76555 + not originally created as executable,
76556 + - making read-only executable pages writable again,
76557 + - creating executable pages from anonymous memory,
76558 + - making read-only-after-relocations (RELRO) data pages writable again.
76559 +
76560 + You should say Y here to complete the protection provided by
76561 + the enforcement of non-executable pages.
76562 +
76563 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76564 + this feature on a per file basis.
76565 +
76566 +config PAX_MPROTECT_COMPAT
76567 + bool "Use legacy/compat protection demoting (read help)"
76568 + depends on PAX_MPROTECT
76569 + default n
76570 + help
76571 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76572 + by sending the proper error code to the application. For some broken
76573 + userland, this can cause problems with Python or other applications. The
76574 + current implementation however allows for applications like clamav to
76575 + detect if JIT compilation/execution is allowed and to fall back gracefully
76576 + to an interpreter-based mode if it does not. While we encourage everyone
76577 + to use the current implementation as-is and push upstream to fix broken
76578 + userland (note that the RWX logging option can assist with this), in some
76579 + environments this may not be possible. Having to disable MPROTECT
76580 + completely on certain binaries reduces the security benefit of PaX,
76581 + so this option is provided for those environments to revert to the old
76582 + behavior.
76583 +
76584 +config PAX_ELFRELOCS
76585 + bool "Allow ELF text relocations (read help)"
76586 + depends on PAX_MPROTECT
76587 + default n
76588 + help
76589 + Non-executable pages and mprotect() restrictions are effective
76590 + in preventing the introduction of new executable code into an
76591 + attacked task's address space. There remain only two venues
76592 + for this kind of attack: if the attacker can execute already
76593 + existing code in the attacked task then he can either have it
76594 + create and mmap() a file containing his code or have it mmap()
76595 + an already existing ELF library that does not have position
76596 + independent code in it and use mprotect() on it to make it
76597 + writable and copy his code there. While protecting against
76598 + the former approach is beyond PaX, the latter can be prevented
76599 + by having only PIC ELF libraries on one's system (which do not
76600 + need to relocate their code). If you are sure this is your case,
76601 + as is the case with all modern Linux distributions, then leave
76602 + this option disabled. You should say 'n' here.
76603 +
76604 +config PAX_ETEXECRELOCS
76605 + bool "Allow ELF ET_EXEC text relocations"
76606 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76607 + select PAX_ELFRELOCS
76608 + default y
76609 + help
76610 + On some architectures there are incorrectly created applications
76611 + that require text relocations and would not work without enabling
76612 + this option. If you are an alpha, ia64 or parisc user, you should
76613 + enable this option and disable it once you have made sure that
76614 + none of your applications need it.
76615 +
76616 +config PAX_EMUPLT
76617 + bool "Automatically emulate ELF PLT"
76618 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76619 + default y
76620 + help
76621 + Enabling this option will have the kernel automatically detect
76622 + and emulate the Procedure Linkage Table entries in ELF files.
76623 + On some architectures such entries are in writable memory, and
76624 + become non-executable leading to task termination. Therefore
76625 + it is mandatory that you enable this option on alpha, parisc,
76626 + sparc and sparc64, otherwise your system would not even boot.
76627 +
76628 + NOTE: this feature *does* open up a loophole in the protection
76629 + provided by the non-executable pages, therefore the proper
76630 + solution is to modify the toolchain to produce a PLT that does
76631 + not need to be writable.
76632 +
76633 +config PAX_DLRESOLVE
76634 + bool 'Emulate old glibc resolver stub'
76635 + depends on PAX_EMUPLT && SPARC
76636 + default n
76637 + help
76638 + This option is needed if userland has an old glibc (before 2.4)
76639 + that puts a 'save' instruction into the runtime generated resolver
76640 + stub that needs special emulation.
76641 +
76642 +config PAX_KERNEXEC
76643 + bool "Enforce non-executable kernel pages"
76644 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76645 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76646 + select PAX_KERNEXEC_PLUGIN if X86_64
76647 + help
76648 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76649 + that is, enabling this option will make it harder to inject
76650 + and execute 'foreign' code in kernel memory itself.
76651 +
76652 + Note that on x86_64 kernels there is a known regression when
76653 + this feature and KVM/VMX are both enabled in the host kernel.
76654 +
76655 +choice
76656 + prompt "Return Address Instrumentation Method"
76657 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
76658 + depends on PAX_KERNEXEC_PLUGIN
76659 + help
76660 + Select the method used to instrument function pointer dereferences.
76661 + Note that binary modules cannot be instrumented by this approach.
76662 +
76663 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
76664 + bool "bts"
76665 + help
76666 + This method is compatible with binary only modules but has
76667 + a higher runtime overhead.
76668 +
76669 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
76670 + bool "or"
76671 + depends on !PARAVIRT
76672 + help
76673 + This method is incompatible with binary only modules but has
76674 + a lower runtime overhead.
76675 +endchoice
76676 +
76677 +config PAX_KERNEXEC_PLUGIN_METHOD
76678 + string
76679 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
76680 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
76681 + default ""
76682 +
76683 +config PAX_KERNEXEC_MODULE_TEXT
76684 + int "Minimum amount of memory reserved for module code"
76685 + default "4"
76686 + depends on PAX_KERNEXEC && X86_32 && MODULES
76687 + help
76688 + Due to implementation details the kernel must reserve a fixed
76689 + amount of memory for module code at compile time that cannot be
76690 + changed at runtime. Here you can specify the minimum amount
76691 + in MB that will be reserved. Due to the same implementation
76692 + details this size will always be rounded up to the next 2/4 MB
76693 + boundary (depends on PAE) so the actually available memory for
76694 + module code will usually be more than this minimum.
76695 +
76696 + The default 4 MB should be enough for most users but if you have
76697 + an excessive number of modules (e.g., most distribution configs
76698 + compile many drivers as modules) or use huge modules such as
76699 + nvidia's kernel driver, you will need to adjust this amount.
76700 + A good rule of thumb is to look at your currently loaded kernel
76701 + modules and add up their sizes.
76702 +
76703 +endmenu
76704 +
76705 +menu "Address Space Layout Randomization"
76706 + depends on PAX
76707 +
76708 +config PAX_ASLR
76709 + bool "Address Space Layout Randomization"
76710 + help
76711 + Many if not most exploit techniques rely on the knowledge of
76712 + certain addresses in the attacked program. The following options
76713 + will allow the kernel to apply a certain amount of randomization
76714 + to specific parts of the program thereby forcing an attacker to
76715 + guess them in most cases. Any failed guess will most likely crash
76716 + the attacked program which allows the kernel to detect such attempts
76717 + and react on them. PaX itself provides no reaction mechanisms,
76718 + instead it is strongly encouraged that you make use of Nergal's
76719 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76720 + (http://www.grsecurity.net/) built-in crash detection features or
76721 + develop one yourself.
76722 +
76723 + By saying Y here you can choose to randomize the following areas:
76724 + - top of the task's kernel stack
76725 + - top of the task's userland stack
76726 + - base address for mmap() requests that do not specify one
76727 + (this includes all libraries)
76728 + - base address of the main executable
76729 +
76730 + It is strongly recommended to say Y here as address space layout
76731 + randomization has negligible impact on performance yet it provides
76732 + a very effective protection.
76733 +
76734 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76735 + this feature on a per file basis.
76736 +
76737 +config PAX_RANDKSTACK
76738 + bool "Randomize kernel stack base"
76739 + depends on X86_TSC && X86
76740 + help
76741 + By saying Y here the kernel will randomize every task's kernel
76742 + stack on every system call. This will not only force an attacker
76743 + to guess it but also prevent him from making use of possible
76744 + leaked information about it.
76745 +
76746 + Since the kernel stack is a rather scarce resource, randomization
76747 + may cause unexpected stack overflows, therefore you should very
76748 + carefully test your system. Note that once enabled in the kernel
76749 + configuration, this feature cannot be disabled on a per file basis.
76750 +
76751 +config PAX_RANDUSTACK
76752 + bool "Randomize user stack base"
76753 + depends on PAX_ASLR
76754 + help
76755 + By saying Y here the kernel will randomize every task's userland
76756 + stack. The randomization is done in two steps where the second
76757 + one may apply a big amount of shift to the top of the stack and
76758 + cause problems for programs that want to use lots of memory (more
76759 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76760 + For this reason the second step can be controlled by 'chpax' or
76761 + 'paxctl' on a per file basis.
76762 +
76763 +config PAX_RANDMMAP
76764 + bool "Randomize mmap() base"
76765 + depends on PAX_ASLR
76766 + help
76767 + By saying Y here the kernel will use a randomized base address for
76768 + mmap() requests that do not specify one themselves. As a result
76769 + all dynamically loaded libraries will appear at random addresses
76770 + and therefore be harder to exploit by a technique where an attacker
76771 + attempts to execute library code for his purposes (e.g. spawn a
76772 + shell from an exploited program that is running at an elevated
76773 + privilege level).
76774 +
76775 + Furthermore, if a program is relinked as a dynamic ELF file, its
76776 + base address will be randomized as well, completing the full
76777 + randomization of the address space layout. Attacking such programs
76778 + becomes a guess game. You can find an example of doing this at
76779 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76780 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76781 +
76782 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76783 + feature on a per file basis.
76784 +
76785 +endmenu
76786 +
76787 +menu "Miscellaneous hardening features"
76788 +
76789 +config PAX_MEMORY_SANITIZE
76790 + bool "Sanitize all freed memory"
76791 + depends on !HIBERNATION
76792 + help
76793 + By saying Y here the kernel will erase memory pages as soon as they
76794 + are freed. This in turn reduces the lifetime of data stored in the
76795 + pages, making it less likely that sensitive information such as
76796 + passwords, cryptographic secrets, etc stay in memory for too long.
76797 +
76798 + This is especially useful for programs whose runtime is short, long
76799 + lived processes and the kernel itself benefit from this as long as
76800 + they operate on whole memory pages and ensure timely freeing of pages
76801 + that may hold sensitive information.
76802 +
76803 + The tradeoff is performance impact, on a single CPU system kernel
76804 + compilation sees a 3% slowdown, other systems and workloads may vary
76805 + and you are advised to test this feature on your expected workload
76806 + before deploying it.
76807 +
76808 + Note that this feature does not protect data stored in live pages,
76809 + e.g., process memory swapped to disk may stay there for a long time.
76810 +
76811 +config PAX_MEMORY_STACKLEAK
76812 + bool "Sanitize kernel stack"
76813 + depends on X86
76814 + help
76815 + By saying Y here the kernel will erase the kernel stack before it
76816 + returns from a system call. This in turn reduces the information
76817 + that a kernel stack leak bug can reveal.
76818 +
76819 + Note that such a bug can still leak information that was put on
76820 + the stack by the current system call (the one eventually triggering
76821 + the bug) but traces of earlier system calls on the kernel stack
76822 + cannot leak anymore.
76823 +
76824 + The tradeoff is performance impact: on a single CPU system kernel
76825 + compilation sees a 1% slowdown, other systems and workloads may vary
76826 + and you are advised to test this feature on your expected workload
76827 + before deploying it.
76828 +
76829 + Note: full support for this feature requires gcc with plugin support
76830 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
76831 + versions means that functions with large enough stack frames may
76832 + leave uninitialized memory behind that may be exposed to a later
76833 + syscall leaking the stack.
76834 +
76835 +config PAX_MEMORY_UDEREF
76836 + bool "Prevent invalid userland pointer dereference"
76837 + depends on X86 && !UML_X86 && !XEN
76838 + select PAX_PER_CPU_PGD if X86_64
76839 + help
76840 + By saying Y here the kernel will be prevented from dereferencing
76841 + userland pointers in contexts where the kernel expects only kernel
76842 + pointers. This is both a useful runtime debugging feature and a
76843 + security measure that prevents exploiting a class of kernel bugs.
76844 +
76845 + The tradeoff is that some virtualization solutions may experience
76846 + a huge slowdown and therefore you should not enable this feature
76847 + for kernels meant to run in such environments. Whether a given VM
76848 + solution is affected or not is best determined by simply trying it
76849 + out, the performance impact will be obvious right on boot as this
76850 + mechanism engages from very early on. A good rule of thumb is that
76851 + VMs running on CPUs without hardware virtualization support (i.e.,
76852 + the majority of IA-32 CPUs) will likely experience the slowdown.
76853 +
76854 +config PAX_REFCOUNT
76855 + bool "Prevent various kernel object reference counter overflows"
76856 + depends on GRKERNSEC && (X86 || SPARC64)
76857 + help
76858 + By saying Y here the kernel will detect and prevent overflowing
76859 + various (but not all) kinds of object reference counters. Such
76860 + overflows can normally occur due to bugs only and are often, if
76861 + not always, exploitable.
76862 +
76863 + The tradeoff is that data structures protected by an overflowed
76864 + refcount will never be freed and therefore will leak memory. Note
76865 + that this leak also happens even without this protection but in
76866 + that case the overflow can eventually trigger the freeing of the
76867 + data structure while it is still being used elsewhere, resulting
76868 + in the exploitable situation that this feature prevents.
76869 +
76870 + Since this has a negligible performance impact, you should enable
76871 + this feature.
76872 +
76873 +config PAX_USERCOPY
76874 + bool "Harden heap object copies between kernel and userland"
76875 + depends on X86 || PPC || SPARC || ARM
76876 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76877 + help
76878 + By saying Y here the kernel will enforce the size of heap objects
76879 + when they are copied in either direction between the kernel and
76880 + userland, even if only a part of the heap object is copied.
76881 +
76882 + Specifically, this checking prevents information leaking from the
76883 + kernel heap during kernel to userland copies (if the kernel heap
76884 + object is otherwise fully initialized) and prevents kernel heap
76885 + overflows during userland to kernel copies.
76886 +
76887 + Note that the current implementation provides the strictest bounds
76888 + checks for the SLUB allocator.
76889 +
76890 + Enabling this option also enables per-slab cache protection against
76891 + data in a given cache being copied into/out of via userland
76892 + accessors. Though the whitelist of regions will be reduced over
76893 + time, it notably protects important data structures like task structs.
76894 +
76895 + If frame pointers are enabled on x86, this option will also restrict
76896 + copies into and out of the kernel stack to local variables within a
76897 + single frame.
76898 +
76899 + Since this has a negligible performance impact, you should enable
76900 + this feature.
76901 +
76902 +endmenu
76903 +
76904 +endmenu
76905 +
76906 config KEYS
76907 bool "Enable access key retention support"
76908 help
76909 @@ -169,7 +790,7 @@ config INTEL_TXT
76910 config LSM_MMAP_MIN_ADDR
76911 int "Low address space for LSM to protect from user allocation"
76912 depends on SECURITY && SECURITY_SELINUX
76913 - default 32768 if ARM
76914 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76915 default 65536
76916 help
76917 This is the portion of low virtual memory which should be protected
76918 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
76919 index 3783202..1852837 100644
76920 --- a/security/apparmor/lsm.c
76921 +++ b/security/apparmor/lsm.c
76922 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
76923 return error;
76924 }
76925
76926 -static struct security_operations apparmor_ops = {
76927 +static struct security_operations apparmor_ops __read_only = {
76928 .name = "apparmor",
76929
76930 .ptrace_access_check = apparmor_ptrace_access_check,
76931 diff --git a/security/commoncap.c b/security/commoncap.c
76932 index ee4f848..a320c64 100644
76933 --- a/security/commoncap.c
76934 +++ b/security/commoncap.c
76935 @@ -28,6 +28,7 @@
76936 #include <linux/prctl.h>
76937 #include <linux/securebits.h>
76938 #include <linux/user_namespace.h>
76939 +#include <net/sock.h>
76940
76941 /*
76942 * If a non-root user executes a setuid-root binary in
76943 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
76944
76945 int cap_netlink_recv(struct sk_buff *skb, int cap)
76946 {
76947 - if (!cap_raised(current_cap(), cap))
76948 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
76949 return -EPERM;
76950 return 0;
76951 }
76952 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
76953 {
76954 const struct cred *cred = current_cred();
76955
76956 + if (gr_acl_enable_at_secure())
76957 + return 1;
76958 +
76959 if (cred->uid != 0) {
76960 if (bprm->cap_effective)
76961 return 1;
76962 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
76963 index 3ccf7ac..d73ad64 100644
76964 --- a/security/integrity/ima/ima.h
76965 +++ b/security/integrity/ima/ima.h
76966 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76967 extern spinlock_t ima_queue_lock;
76968
76969 struct ima_h_table {
76970 - atomic_long_t len; /* number of stored measurements in the list */
76971 - atomic_long_t violations;
76972 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
76973 + atomic_long_unchecked_t violations;
76974 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
76975 };
76976 extern struct ima_h_table ima_htable;
76977 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
76978 index 88a2788..581ab92 100644
76979 --- a/security/integrity/ima/ima_api.c
76980 +++ b/security/integrity/ima/ima_api.c
76981 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76982 int result;
76983
76984 /* can overflow, only indicator */
76985 - atomic_long_inc(&ima_htable.violations);
76986 + atomic_long_inc_unchecked(&ima_htable.violations);
76987
76988 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
76989 if (!entry) {
76990 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
76991 index c5c5a72..2ad942f 100644
76992 --- a/security/integrity/ima/ima_audit.c
76993 +++ b/security/integrity/ima/ima_audit.c
76994 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
76995 audit_log_format(ab, " name=");
76996 audit_log_untrustedstring(ab, fname);
76997 }
76998 - if (inode)
76999 - audit_log_format(ab, " dev=%s ino=%lu",
77000 - inode->i_sb->s_id, inode->i_ino);
77001 + if (inode) {
77002 + audit_log_format(ab, " dev=");
77003 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77004 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77005 + }
77006 audit_log_format(ab, " res=%d", !result ? 0 : 1);
77007 audit_log_end(ab);
77008 }
77009 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
77010 index e1aa2b4..52027bf 100644
77011 --- a/security/integrity/ima/ima_fs.c
77012 +++ b/security/integrity/ima/ima_fs.c
77013 @@ -28,12 +28,12 @@
77014 static int valid_policy = 1;
77015 #define TMPBUFLEN 12
77016 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
77017 - loff_t *ppos, atomic_long_t *val)
77018 + loff_t *ppos, atomic_long_unchecked_t *val)
77019 {
77020 char tmpbuf[TMPBUFLEN];
77021 ssize_t len;
77022
77023 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
77024 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
77025 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
77026 }
77027
77028 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
77029 index 55a6271..ad829c3 100644
77030 --- a/security/integrity/ima/ima_queue.c
77031 +++ b/security/integrity/ima/ima_queue.c
77032 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
77033 INIT_LIST_HEAD(&qe->later);
77034 list_add_tail_rcu(&qe->later, &ima_measurements);
77035
77036 - atomic_long_inc(&ima_htable.len);
77037 + atomic_long_inc_unchecked(&ima_htable.len);
77038 key = ima_hash_key(entry->digest);
77039 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
77040 return 0;
77041 diff --git a/security/keys/compat.c b/security/keys/compat.c
77042 index 4c48e13..7abdac9 100644
77043 --- a/security/keys/compat.c
77044 +++ b/security/keys/compat.c
77045 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
77046 if (ret == 0)
77047 goto no_payload_free;
77048
77049 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77050 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77051
77052 if (iov != iovstack)
77053 kfree(iov);
77054 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
77055 index 0b3f5d7..892c8a6 100644
77056 --- a/security/keys/keyctl.c
77057 +++ b/security/keys/keyctl.c
77058 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
77059 /*
77060 * Copy the iovec data from userspace
77061 */
77062 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77063 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
77064 unsigned ioc)
77065 {
77066 for (; ioc > 0; ioc--) {
77067 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77068 * If successful, 0 will be returned.
77069 */
77070 long keyctl_instantiate_key_common(key_serial_t id,
77071 - const struct iovec *payload_iov,
77072 + const struct iovec __user *payload_iov,
77073 unsigned ioc,
77074 size_t plen,
77075 key_serial_t ringid)
77076 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
77077 [0].iov_len = plen
77078 };
77079
77080 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
77081 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
77082 }
77083
77084 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
77085 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
77086 if (ret == 0)
77087 goto no_payload_free;
77088
77089 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77090 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77091
77092 if (iov != iovstack)
77093 kfree(iov);
77094 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
77095 index 37a7f3b..86dc19f 100644
77096 --- a/security/keys/keyring.c
77097 +++ b/security/keys/keyring.c
77098 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
77099 ret = -EFAULT;
77100
77101 for (loop = 0; loop < klist->nkeys; loop++) {
77102 + key_serial_t serial;
77103 key = klist->keys[loop];
77104 + serial = key->serial;
77105
77106 tmp = sizeof(key_serial_t);
77107 if (tmp > buflen)
77108 tmp = buflen;
77109
77110 - if (copy_to_user(buffer,
77111 - &key->serial,
77112 - tmp) != 0)
77113 + if (copy_to_user(buffer, &serial, tmp))
77114 goto error;
77115
77116 buflen -= tmp;
77117 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
77118 index 893af8a..ba9237c 100644
77119 --- a/security/lsm_audit.c
77120 +++ b/security/lsm_audit.c
77121 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77122 audit_log_d_path(ab, "path=", &a->u.path);
77123
77124 inode = a->u.path.dentry->d_inode;
77125 - if (inode)
77126 - audit_log_format(ab, " dev=%s ino=%lu",
77127 - inode->i_sb->s_id,
77128 - inode->i_ino);
77129 + if (inode) {
77130 + audit_log_format(ab, " dev=");
77131 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77132 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77133 + }
77134 break;
77135 }
77136 case LSM_AUDIT_DATA_DENTRY: {
77137 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77138 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
77139
77140 inode = a->u.dentry->d_inode;
77141 - if (inode)
77142 - audit_log_format(ab, " dev=%s ino=%lu",
77143 - inode->i_sb->s_id,
77144 - inode->i_ino);
77145 + if (inode) {
77146 + audit_log_format(ab, " dev=");
77147 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77148 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77149 + }
77150 break;
77151 }
77152 case LSM_AUDIT_DATA_INODE: {
77153 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77154 dentry->d_name.name);
77155 dput(dentry);
77156 }
77157 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
77158 - inode->i_ino);
77159 + audit_log_format(ab, " dev=");
77160 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77161 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77162 break;
77163 }
77164 case LSM_AUDIT_DATA_TASK:
77165 diff --git a/security/min_addr.c b/security/min_addr.c
77166 index f728728..6457a0c 100644
77167 --- a/security/min_addr.c
77168 +++ b/security/min_addr.c
77169 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
77170 */
77171 static void update_mmap_min_addr(void)
77172 {
77173 +#ifndef SPARC
77174 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
77175 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
77176 mmap_min_addr = dac_mmap_min_addr;
77177 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
77178 #else
77179 mmap_min_addr = dac_mmap_min_addr;
77180 #endif
77181 +#endif
77182 }
77183
77184 /*
77185 diff --git a/security/security.c b/security/security.c
77186 index e2f684a..8d62ef5 100644
77187 --- a/security/security.c
77188 +++ b/security/security.c
77189 @@ -26,8 +26,8 @@
77190 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
77191 CONFIG_DEFAULT_SECURITY;
77192
77193 -static struct security_operations *security_ops;
77194 -static struct security_operations default_security_ops = {
77195 +static struct security_operations *security_ops __read_only;
77196 +static struct security_operations default_security_ops __read_only = {
77197 .name = "default",
77198 };
77199
77200 @@ -68,7 +68,9 @@ int __init security_init(void)
77201
77202 void reset_security_ops(void)
77203 {
77204 + pax_open_kernel();
77205 security_ops = &default_security_ops;
77206 + pax_close_kernel();
77207 }
77208
77209 /* Save user chosen LSM */
77210 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
77211 index 1126c10..effb32b 100644
77212 --- a/security/selinux/hooks.c
77213 +++ b/security/selinux/hooks.c
77214 @@ -94,8 +94,6 @@
77215
77216 #define NUM_SEL_MNT_OPTS 5
77217
77218 -extern struct security_operations *security_ops;
77219 -
77220 /* SECMARK reference count */
77221 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
77222
77223 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
77224
77225 #endif
77226
77227 -static struct security_operations selinux_ops = {
77228 +static struct security_operations selinux_ops __read_only = {
77229 .name = "selinux",
77230
77231 .ptrace_access_check = selinux_ptrace_access_check,
77232 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
77233 index b43813c..74be837 100644
77234 --- a/security/selinux/include/xfrm.h
77235 +++ b/security/selinux/include/xfrm.h
77236 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
77237
77238 static inline void selinux_xfrm_notify_policyload(void)
77239 {
77240 - atomic_inc(&flow_cache_genid);
77241 + atomic_inc_unchecked(&flow_cache_genid);
77242 }
77243 #else
77244 static inline int selinux_xfrm_enabled(void)
77245 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
77246 index 7db62b4..ee4d949 100644
77247 --- a/security/smack/smack_lsm.c
77248 +++ b/security/smack/smack_lsm.c
77249 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
77250 return 0;
77251 }
77252
77253 -struct security_operations smack_ops = {
77254 +struct security_operations smack_ops __read_only = {
77255 .name = "smack",
77256
77257 .ptrace_access_check = smack_ptrace_access_check,
77258 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
77259 index 4b327b6..646c57a 100644
77260 --- a/security/tomoyo/tomoyo.c
77261 +++ b/security/tomoyo/tomoyo.c
77262 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
77263 * tomoyo_security_ops is a "struct security_operations" which is used for
77264 * registering TOMOYO.
77265 */
77266 -static struct security_operations tomoyo_security_ops = {
77267 +static struct security_operations tomoyo_security_ops __read_only = {
77268 .name = "tomoyo",
77269 .cred_alloc_blank = tomoyo_cred_alloc_blank,
77270 .cred_prepare = tomoyo_cred_prepare,
77271 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
77272 index 762af68..7103453 100644
77273 --- a/sound/aoa/codecs/onyx.c
77274 +++ b/sound/aoa/codecs/onyx.c
77275 @@ -54,7 +54,7 @@ struct onyx {
77276 spdif_locked:1,
77277 analog_locked:1,
77278 original_mute:2;
77279 - int open_count;
77280 + local_t open_count;
77281 struct codec_info *codec_info;
77282
77283 /* mutex serializes concurrent access to the device
77284 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
77285 struct onyx *onyx = cii->codec_data;
77286
77287 mutex_lock(&onyx->mutex);
77288 - onyx->open_count++;
77289 + local_inc(&onyx->open_count);
77290 mutex_unlock(&onyx->mutex);
77291
77292 return 0;
77293 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
77294 struct onyx *onyx = cii->codec_data;
77295
77296 mutex_lock(&onyx->mutex);
77297 - onyx->open_count--;
77298 - if (!onyx->open_count)
77299 + if (local_dec_and_test(&onyx->open_count))
77300 onyx->spdif_locked = onyx->analog_locked = 0;
77301 mutex_unlock(&onyx->mutex);
77302
77303 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
77304 index ffd2025..df062c9 100644
77305 --- a/sound/aoa/codecs/onyx.h
77306 +++ b/sound/aoa/codecs/onyx.h
77307 @@ -11,6 +11,7 @@
77308 #include <linux/i2c.h>
77309 #include <asm/pmac_low_i2c.h>
77310 #include <asm/prom.h>
77311 +#include <asm/local.h>
77312
77313 /* PCM3052 register definitions */
77314
77315 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
77316 index 3cc4b86..af0a951 100644
77317 --- a/sound/core/oss/pcm_oss.c
77318 +++ b/sound/core/oss/pcm_oss.c
77319 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
77320 if (in_kernel) {
77321 mm_segment_t fs;
77322 fs = snd_enter_user();
77323 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77324 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77325 snd_leave_user(fs);
77326 } else {
77327 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77328 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77329 }
77330 if (ret != -EPIPE && ret != -ESTRPIPE)
77331 break;
77332 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
77333 if (in_kernel) {
77334 mm_segment_t fs;
77335 fs = snd_enter_user();
77336 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77337 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77338 snd_leave_user(fs);
77339 } else {
77340 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77341 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77342 }
77343 if (ret == -EPIPE) {
77344 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
77345 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
77346 struct snd_pcm_plugin_channel *channels;
77347 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
77348 if (!in_kernel) {
77349 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
77350 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
77351 return -EFAULT;
77352 buf = runtime->oss.buffer;
77353 }
77354 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
77355 }
77356 } else {
77357 tmp = snd_pcm_oss_write2(substream,
77358 - (const char __force *)buf,
77359 + (const char __force_kernel *)buf,
77360 runtime->oss.period_bytes, 0);
77361 if (tmp <= 0)
77362 goto err;
77363 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
77364 struct snd_pcm_runtime *runtime = substream->runtime;
77365 snd_pcm_sframes_t frames, frames1;
77366 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
77367 - char __user *final_dst = (char __force __user *)buf;
77368 + char __user *final_dst = (char __force_user *)buf;
77369 if (runtime->oss.plugin_first) {
77370 struct snd_pcm_plugin_channel *channels;
77371 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
77372 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
77373 xfer += tmp;
77374 runtime->oss.buffer_used -= tmp;
77375 } else {
77376 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
77377 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
77378 runtime->oss.period_bytes, 0);
77379 if (tmp <= 0)
77380 goto err;
77381 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
77382 size1);
77383 size1 /= runtime->channels; /* frames */
77384 fs = snd_enter_user();
77385 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
77386 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
77387 snd_leave_user(fs);
77388 }
77389 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
77390 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
77391 index 91cdf94..4085161 100644
77392 --- a/sound/core/pcm_compat.c
77393 +++ b/sound/core/pcm_compat.c
77394 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
77395 int err;
77396
77397 fs = snd_enter_user();
77398 - err = snd_pcm_delay(substream, &delay);
77399 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
77400 snd_leave_user(fs);
77401 if (err < 0)
77402 return err;
77403 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
77404 index 25ed9fe..24c46e9 100644
77405 --- a/sound/core/pcm_native.c
77406 +++ b/sound/core/pcm_native.c
77407 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
77408 switch (substream->stream) {
77409 case SNDRV_PCM_STREAM_PLAYBACK:
77410 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
77411 - (void __user *)arg);
77412 + (void __force_user *)arg);
77413 break;
77414 case SNDRV_PCM_STREAM_CAPTURE:
77415 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
77416 - (void __user *)arg);
77417 + (void __force_user *)arg);
77418 break;
77419 default:
77420 result = -EINVAL;
77421 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
77422 index 5cf8d65..912a79c 100644
77423 --- a/sound/core/seq/seq_device.c
77424 +++ b/sound/core/seq/seq_device.c
77425 @@ -64,7 +64,7 @@ struct ops_list {
77426 int argsize; /* argument size */
77427
77428 /* operators */
77429 - struct snd_seq_dev_ops ops;
77430 + struct snd_seq_dev_ops *ops;
77431
77432 /* registred devices */
77433 struct list_head dev_list; /* list of devices */
77434 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
77435
77436 mutex_lock(&ops->reg_mutex);
77437 /* copy driver operators */
77438 - ops->ops = *entry;
77439 + ops->ops = entry;
77440 ops->driver |= DRIVER_LOADED;
77441 ops->argsize = argsize;
77442
77443 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
77444 dev->name, ops->id, ops->argsize, dev->argsize);
77445 return -EINVAL;
77446 }
77447 - if (ops->ops.init_device(dev) >= 0) {
77448 + if (ops->ops->init_device(dev) >= 0) {
77449 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
77450 ops->num_init_devices++;
77451 } else {
77452 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
77453 dev->name, ops->id, ops->argsize, dev->argsize);
77454 return -EINVAL;
77455 }
77456 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
77457 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
77458 dev->status = SNDRV_SEQ_DEVICE_FREE;
77459 dev->driver_data = NULL;
77460 ops->num_init_devices--;
77461 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
77462 index f24bf9a..1f7b67c 100644
77463 --- a/sound/drivers/mts64.c
77464 +++ b/sound/drivers/mts64.c
77465 @@ -29,6 +29,7 @@
77466 #include <sound/initval.h>
77467 #include <sound/rawmidi.h>
77468 #include <sound/control.h>
77469 +#include <asm/local.h>
77470
77471 #define CARD_NAME "Miditerminal 4140"
77472 #define DRIVER_NAME "MTS64"
77473 @@ -67,7 +68,7 @@ struct mts64 {
77474 struct pardevice *pardev;
77475 int pardev_claimed;
77476
77477 - int open_count;
77478 + local_t open_count;
77479 int current_midi_output_port;
77480 int current_midi_input_port;
77481 u8 mode[MTS64_NUM_INPUT_PORTS];
77482 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77483 {
77484 struct mts64 *mts = substream->rmidi->private_data;
77485
77486 - if (mts->open_count == 0) {
77487 + if (local_read(&mts->open_count) == 0) {
77488 /* We don't need a spinlock here, because this is just called
77489 if the device has not been opened before.
77490 So there aren't any IRQs from the device */
77491 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77492
77493 msleep(50);
77494 }
77495 - ++(mts->open_count);
77496 + local_inc(&mts->open_count);
77497
77498 return 0;
77499 }
77500 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77501 struct mts64 *mts = substream->rmidi->private_data;
77502 unsigned long flags;
77503
77504 - --(mts->open_count);
77505 - if (mts->open_count == 0) {
77506 + if (local_dec_return(&mts->open_count) == 0) {
77507 /* We need the spinlock_irqsave here because we can still
77508 have IRQs at this point */
77509 spin_lock_irqsave(&mts->lock, flags);
77510 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77511
77512 msleep(500);
77513
77514 - } else if (mts->open_count < 0)
77515 - mts->open_count = 0;
77516 + } else if (local_read(&mts->open_count) < 0)
77517 + local_set(&mts->open_count, 0);
77518
77519 return 0;
77520 }
77521 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
77522 index b953fb4..1999c01 100644
77523 --- a/sound/drivers/opl4/opl4_lib.c
77524 +++ b/sound/drivers/opl4/opl4_lib.c
77525 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
77526 MODULE_DESCRIPTION("OPL4 driver");
77527 MODULE_LICENSE("GPL");
77528
77529 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
77530 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
77531 {
77532 int timeout = 10;
77533 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
77534 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
77535 index f664823..590c745 100644
77536 --- a/sound/drivers/portman2x4.c
77537 +++ b/sound/drivers/portman2x4.c
77538 @@ -48,6 +48,7 @@
77539 #include <sound/initval.h>
77540 #include <sound/rawmidi.h>
77541 #include <sound/control.h>
77542 +#include <asm/local.h>
77543
77544 #define CARD_NAME "Portman 2x4"
77545 #define DRIVER_NAME "portman"
77546 @@ -85,7 +86,7 @@ struct portman {
77547 struct pardevice *pardev;
77548 int pardev_claimed;
77549
77550 - int open_count;
77551 + local_t open_count;
77552 int mode[PORTMAN_NUM_INPUT_PORTS];
77553 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
77554 };
77555 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
77556 index 87657dd..a8268d4 100644
77557 --- a/sound/firewire/amdtp.c
77558 +++ b/sound/firewire/amdtp.c
77559 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
77560 ptr = s->pcm_buffer_pointer + data_blocks;
77561 if (ptr >= pcm->runtime->buffer_size)
77562 ptr -= pcm->runtime->buffer_size;
77563 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
77564 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
77565
77566 s->pcm_period_pointer += data_blocks;
77567 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
77568 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
77569 */
77570 void amdtp_out_stream_update(struct amdtp_out_stream *s)
77571 {
77572 - ACCESS_ONCE(s->source_node_id_field) =
77573 + ACCESS_ONCE_RW(s->source_node_id_field) =
77574 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
77575 }
77576 EXPORT_SYMBOL(amdtp_out_stream_update);
77577 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
77578 index 537a9cb..8e8c8e9 100644
77579 --- a/sound/firewire/amdtp.h
77580 +++ b/sound/firewire/amdtp.h
77581 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
77582 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77583 struct snd_pcm_substream *pcm)
77584 {
77585 - ACCESS_ONCE(s->pcm) = pcm;
77586 + ACCESS_ONCE_RW(s->pcm) = pcm;
77587 }
77588
77589 /**
77590 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
77591 index cd094ec..eca1277 100644
77592 --- a/sound/firewire/isight.c
77593 +++ b/sound/firewire/isight.c
77594 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
77595 ptr += count;
77596 if (ptr >= runtime->buffer_size)
77597 ptr -= runtime->buffer_size;
77598 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77599 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77600
77601 isight->period_counter += count;
77602 if (isight->period_counter >= runtime->period_size) {
77603 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
77604 if (err < 0)
77605 return err;
77606
77607 - ACCESS_ONCE(isight->pcm_active) = true;
77608 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77609
77610 return 0;
77611 }
77612 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
77613 {
77614 struct isight *isight = substream->private_data;
77615
77616 - ACCESS_ONCE(isight->pcm_active) = false;
77617 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77618
77619 mutex_lock(&isight->mutex);
77620 isight_stop_streaming(isight);
77621 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
77622
77623 switch (cmd) {
77624 case SNDRV_PCM_TRIGGER_START:
77625 - ACCESS_ONCE(isight->pcm_running) = true;
77626 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77627 break;
77628 case SNDRV_PCM_TRIGGER_STOP:
77629 - ACCESS_ONCE(isight->pcm_running) = false;
77630 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77631 break;
77632 default:
77633 return -EINVAL;
77634 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
77635 index c94578d..0794ac1 100644
77636 --- a/sound/isa/cmi8330.c
77637 +++ b/sound/isa/cmi8330.c
77638 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77639
77640 struct snd_pcm *pcm;
77641 struct snd_cmi8330_stream {
77642 - struct snd_pcm_ops ops;
77643 + snd_pcm_ops_no_const ops;
77644 snd_pcm_open_callback_t open;
77645 void *private_data; /* sb or wss */
77646 } streams[2];
77647 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
77648 index 733b014..56ce96f 100644
77649 --- a/sound/oss/sb_audio.c
77650 +++ b/sound/oss/sb_audio.c
77651 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77652 buf16 = (signed short *)(localbuf + localoffs);
77653 while (c)
77654 {
77655 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77656 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77657 if (copy_from_user(lbuf8,
77658 userbuf+useroffs + p,
77659 locallen))
77660 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
77661 index 09d4648..cf234c7 100644
77662 --- a/sound/oss/swarm_cs4297a.c
77663 +++ b/sound/oss/swarm_cs4297a.c
77664 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77665 {
77666 struct cs4297a_state *s;
77667 u32 pwr, id;
77668 - mm_segment_t fs;
77669 int rval;
77670 #ifndef CONFIG_BCM_CS4297A_CSWARM
77671 u64 cfg;
77672 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77673 if (!rval) {
77674 char *sb1250_duart_present;
77675
77676 +#if 0
77677 + mm_segment_t fs;
77678 fs = get_fs();
77679 set_fs(KERNEL_DS);
77680 -#if 0
77681 val = SOUND_MASK_LINE;
77682 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77683 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77684 val = initvol[i].vol;
77685 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77686 }
77687 + set_fs(fs);
77688 // cs4297a_write_ac97(s, 0x18, 0x0808);
77689 #else
77690 // cs4297a_write_ac97(s, 0x5e, 0x180);
77691 cs4297a_write_ac97(s, 0x02, 0x0808);
77692 cs4297a_write_ac97(s, 0x18, 0x0808);
77693 #endif
77694 - set_fs(fs);
77695
77696 list_add(&s->list, &cs4297a_devs);
77697
77698 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
77699 index 5644711..a2aebc1 100644
77700 --- a/sound/pci/hda/hda_codec.h
77701 +++ b/sound/pci/hda/hda_codec.h
77702 @@ -611,7 +611,7 @@ struct hda_bus_ops {
77703 /* notify power-up/down from codec to controller */
77704 void (*pm_notify)(struct hda_bus *bus);
77705 #endif
77706 -};
77707 +} __no_const;
77708
77709 /* template to pass to the bus constructor */
77710 struct hda_bus_template {
77711 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77712 #endif
77713 void (*reboot_notify)(struct hda_codec *codec);
77714 };
77715 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77716
77717 /* record for amp information cache */
77718 struct hda_cache_head {
77719 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77720 struct snd_pcm_substream *substream);
77721 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77722 struct snd_pcm_substream *substream);
77723 -};
77724 +} __no_const;
77725
77726 /* PCM information for each substream */
77727 struct hda_pcm_stream {
77728 @@ -801,7 +802,7 @@ struct hda_codec {
77729 const char *modelname; /* model name for preset */
77730
77731 /* set by patch */
77732 - struct hda_codec_ops patch_ops;
77733 + hda_codec_ops_no_const patch_ops;
77734
77735 /* PCM to create, set by patch_ops.build_pcms callback */
77736 unsigned int num_pcms;
77737 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
77738 index 0da778a..bc38b84 100644
77739 --- a/sound/pci/ice1712/ice1712.h
77740 +++ b/sound/pci/ice1712/ice1712.h
77741 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77742 unsigned int mask_flags; /* total mask bits */
77743 struct snd_akm4xxx_ops {
77744 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77745 - } ops;
77746 + } __no_const ops;
77747 };
77748
77749 struct snd_ice1712_spdif {
77750 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77751 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77752 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77753 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77754 - } ops;
77755 + } __no_const ops;
77756 };
77757
77758
77759 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
77760 index 03ee4e3..be86b46 100644
77761 --- a/sound/pci/ymfpci/ymfpci_main.c
77762 +++ b/sound/pci/ymfpci/ymfpci_main.c
77763 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
77764 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77765 break;
77766 }
77767 - if (atomic_read(&chip->interrupt_sleep_count)) {
77768 - atomic_set(&chip->interrupt_sleep_count, 0);
77769 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77770 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77771 wake_up(&chip->interrupt_sleep);
77772 }
77773 __end:
77774 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
77775 continue;
77776 init_waitqueue_entry(&wait, current);
77777 add_wait_queue(&chip->interrupt_sleep, &wait);
77778 - atomic_inc(&chip->interrupt_sleep_count);
77779 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77780 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77781 remove_wait_queue(&chip->interrupt_sleep, &wait);
77782 }
77783 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
77784 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77785 spin_unlock(&chip->reg_lock);
77786
77787 - if (atomic_read(&chip->interrupt_sleep_count)) {
77788 - atomic_set(&chip->interrupt_sleep_count, 0);
77789 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77790 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77791 wake_up(&chip->interrupt_sleep);
77792 }
77793 }
77794 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
77795 spin_lock_init(&chip->reg_lock);
77796 spin_lock_init(&chip->voice_lock);
77797 init_waitqueue_head(&chip->interrupt_sleep);
77798 - atomic_set(&chip->interrupt_sleep_count, 0);
77799 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77800 chip->card = card;
77801 chip->pci = pci;
77802 chip->irq = -1;
77803 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
77804 index ee15337..e2187a6 100644
77805 --- a/sound/soc/soc-pcm.c
77806 +++ b/sound/soc/soc-pcm.c
77807 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
77808 }
77809
77810 /* ASoC PCM operations */
77811 -static struct snd_pcm_ops soc_pcm_ops = {
77812 +static snd_pcm_ops_no_const soc_pcm_ops = {
77813 .open = soc_pcm_open,
77814 .close = soc_pcm_close,
77815 .hw_params = soc_pcm_hw_params,
77816 diff --git a/sound/usb/card.h b/sound/usb/card.h
77817 index a39edcc..1014050 100644
77818 --- a/sound/usb/card.h
77819 +++ b/sound/usb/card.h
77820 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77821 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77822 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77823 };
77824 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77825
77826 struct snd_usb_substream {
77827 struct snd_usb_stream *stream;
77828 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77829 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77830 spinlock_t lock;
77831
77832 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77833 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77834 int last_frame_number; /* stored frame number */
77835 int last_delay; /* stored delay */
77836 };
77837 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
77838 new file mode 100644
77839 index 0000000..469b06a
77840 --- /dev/null
77841 +++ b/tools/gcc/Makefile
77842 @@ -0,0 +1,21 @@
77843 +#CC := gcc
77844 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77845 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77846 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
77847 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
77848 +
77849 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
77850 +
77851 +hostlibs-y := constify_plugin.so
77852 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77853 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77854 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77855 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77856 +
77857 +always := $(hostlibs-y)
77858 +
77859 +constify_plugin-objs := constify_plugin.o
77860 +stackleak_plugin-objs := stackleak_plugin.o
77861 +kallocstat_plugin-objs := kallocstat_plugin.o
77862 +kernexec_plugin-objs := kernexec_plugin.o
77863 +checker_plugin-objs := checker_plugin.o
77864 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
77865 new file mode 100644
77866 index 0000000..d41b5af
77867 --- /dev/null
77868 +++ b/tools/gcc/checker_plugin.c
77869 @@ -0,0 +1,171 @@
77870 +/*
77871 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77872 + * Licensed under the GPL v2
77873 + *
77874 + * Note: the choice of the license means that the compilation process is
77875 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77876 + * but for the kernel it doesn't matter since it doesn't link against
77877 + * any of the gcc libraries
77878 + *
77879 + * gcc plugin to implement various sparse (source code checker) features
77880 + *
77881 + * TODO:
77882 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77883 + *
77884 + * BUGS:
77885 + * - none known
77886 + */
77887 +#include "gcc-plugin.h"
77888 +#include "config.h"
77889 +#include "system.h"
77890 +#include "coretypes.h"
77891 +#include "tree.h"
77892 +#include "tree-pass.h"
77893 +#include "flags.h"
77894 +#include "intl.h"
77895 +#include "toplev.h"
77896 +#include "plugin.h"
77897 +//#include "expr.h" where are you...
77898 +#include "diagnostic.h"
77899 +#include "plugin-version.h"
77900 +#include "tm.h"
77901 +#include "function.h"
77902 +#include "basic-block.h"
77903 +#include "gimple.h"
77904 +#include "rtl.h"
77905 +#include "emit-rtl.h"
77906 +#include "tree-flow.h"
77907 +#include "target.h"
77908 +
77909 +extern void c_register_addr_space (const char *str, addr_space_t as);
77910 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77911 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77912 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77913 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77914 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77915 +
77916 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77917 +extern rtx emit_move_insn(rtx x, rtx y);
77918 +
77919 +int plugin_is_GPL_compatible;
77920 +
77921 +static struct plugin_info checker_plugin_info = {
77922 + .version = "201111150100",
77923 +};
77924 +
77925 +#define ADDR_SPACE_KERNEL 0
77926 +#define ADDR_SPACE_FORCE_KERNEL 1
77927 +#define ADDR_SPACE_USER 2
77928 +#define ADDR_SPACE_FORCE_USER 3
77929 +#define ADDR_SPACE_IOMEM 0
77930 +#define ADDR_SPACE_FORCE_IOMEM 0
77931 +#define ADDR_SPACE_PERCPU 0
77932 +#define ADDR_SPACE_FORCE_PERCPU 0
77933 +#define ADDR_SPACE_RCU 0
77934 +#define ADDR_SPACE_FORCE_RCU 0
77935 +
77936 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77937 +{
77938 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77939 +}
77940 +
77941 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77942 +{
77943 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77944 +}
77945 +
77946 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77947 +{
77948 + return default_addr_space_valid_pointer_mode(mode, as);
77949 +}
77950 +
77951 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77952 +{
77953 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77954 +}
77955 +
77956 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77957 +{
77958 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77959 +}
77960 +
77961 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77962 +{
77963 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77964 + return true;
77965 +
77966 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77967 + return true;
77968 +
77969 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77970 + return true;
77971 +
77972 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77973 + return true;
77974 +
77975 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77976 + return true;
77977 +
77978 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77979 + return true;
77980 +
77981 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77982 + return true;
77983 +
77984 + return subset == superset;
77985 +}
77986 +
77987 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77988 +{
77989 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77990 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77991 +
77992 + return op;
77993 +}
77994 +
77995 +static void register_checker_address_spaces(void *event_data, void *data)
77996 +{
77997 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77998 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77999 + c_register_addr_space("__user", ADDR_SPACE_USER);
78000 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
78001 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
78002 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
78003 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
78004 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
78005 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
78006 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
78007 +
78008 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
78009 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
78010 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
78011 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
78012 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
78013 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
78014 + targetm.addr_space.convert = checker_addr_space_convert;
78015 +}
78016 +
78017 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78018 +{
78019 + const char * const plugin_name = plugin_info->base_name;
78020 + const int argc = plugin_info->argc;
78021 + const struct plugin_argument * const argv = plugin_info->argv;
78022 + int i;
78023 +
78024 + if (!plugin_default_version_check(version, &gcc_version)) {
78025 + error(G_("incompatible gcc/plugin versions"));
78026 + return 1;
78027 + }
78028 +
78029 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
78030 +
78031 + for (i = 0; i < argc; ++i)
78032 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78033 +
78034 + if (TARGET_64BIT == 0)
78035 + return 0;
78036 +
78037 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
78038 +
78039 + return 0;
78040 +}
78041 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
78042 new file mode 100644
78043 index 0000000..704a564
78044 --- /dev/null
78045 +++ b/tools/gcc/constify_plugin.c
78046 @@ -0,0 +1,303 @@
78047 +/*
78048 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
78049 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
78050 + * Licensed under the GPL v2, or (at your option) v3
78051 + *
78052 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
78053 + *
78054 + * Homepage:
78055 + * http://www.grsecurity.net/~ephox/const_plugin/
78056 + *
78057 + * Usage:
78058 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
78059 + * $ gcc -fplugin=constify_plugin.so test.c -O2
78060 + */
78061 +
78062 +#include "gcc-plugin.h"
78063 +#include "config.h"
78064 +#include "system.h"
78065 +#include "coretypes.h"
78066 +#include "tree.h"
78067 +#include "tree-pass.h"
78068 +#include "flags.h"
78069 +#include "intl.h"
78070 +#include "toplev.h"
78071 +#include "plugin.h"
78072 +#include "diagnostic.h"
78073 +#include "plugin-version.h"
78074 +#include "tm.h"
78075 +#include "function.h"
78076 +#include "basic-block.h"
78077 +#include "gimple.h"
78078 +#include "rtl.h"
78079 +#include "emit-rtl.h"
78080 +#include "tree-flow.h"
78081 +
78082 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
78083 +
78084 +int plugin_is_GPL_compatible;
78085 +
78086 +static struct plugin_info const_plugin_info = {
78087 + .version = "201111150100",
78088 + .help = "no-constify\tturn off constification\n",
78089 +};
78090 +
78091 +static void constify_type(tree type);
78092 +static bool walk_struct(tree node);
78093 +
78094 +static tree deconstify_type(tree old_type)
78095 +{
78096 + tree new_type, field;
78097 +
78098 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
78099 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
78100 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
78101 + DECL_FIELD_CONTEXT(field) = new_type;
78102 + TYPE_READONLY(new_type) = 0;
78103 + C_TYPE_FIELDS_READONLY(new_type) = 0;
78104 + return new_type;
78105 +}
78106 +
78107 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78108 +{
78109 + tree type;
78110 +
78111 + *no_add_attrs = true;
78112 + if (TREE_CODE(*node) == FUNCTION_DECL) {
78113 + error("%qE attribute does not apply to functions", name);
78114 + return NULL_TREE;
78115 + }
78116 +
78117 + if (TREE_CODE(*node) == VAR_DECL) {
78118 + error("%qE attribute does not apply to variables", name);
78119 + return NULL_TREE;
78120 + }
78121 +
78122 + if (TYPE_P(*node)) {
78123 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
78124 + *no_add_attrs = false;
78125 + else
78126 + error("%qE attribute applies to struct and union types only", name);
78127 + return NULL_TREE;
78128 + }
78129 +
78130 + type = TREE_TYPE(*node);
78131 +
78132 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
78133 + error("%qE attribute applies to struct and union types only", name);
78134 + return NULL_TREE;
78135 + }
78136 +
78137 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
78138 + error("%qE attribute is already applied to the type", name);
78139 + return NULL_TREE;
78140 + }
78141 +
78142 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
78143 + error("%qE attribute used on type that is not constified", name);
78144 + return NULL_TREE;
78145 + }
78146 +
78147 + if (TREE_CODE(*node) == TYPE_DECL) {
78148 + TREE_TYPE(*node) = deconstify_type(type);
78149 + TREE_READONLY(*node) = 0;
78150 + return NULL_TREE;
78151 + }
78152 +
78153 + return NULL_TREE;
78154 +}
78155 +
78156 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78157 +{
78158 + *no_add_attrs = true;
78159 + if (!TYPE_P(*node)) {
78160 + error("%qE attribute applies to types only", name);
78161 + return NULL_TREE;
78162 + }
78163 +
78164 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
78165 + error("%qE attribute applies to struct and union types only", name);
78166 + return NULL_TREE;
78167 + }
78168 +
78169 + *no_add_attrs = false;
78170 + constify_type(*node);
78171 + return NULL_TREE;
78172 +}
78173 +
78174 +static struct attribute_spec no_const_attr = {
78175 + .name = "no_const",
78176 + .min_length = 0,
78177 + .max_length = 0,
78178 + .decl_required = false,
78179 + .type_required = false,
78180 + .function_type_required = false,
78181 + .handler = handle_no_const_attribute,
78182 +#if BUILDING_GCC_VERSION >= 4007
78183 + .affects_type_identity = true
78184 +#endif
78185 +};
78186 +
78187 +static struct attribute_spec do_const_attr = {
78188 + .name = "do_const",
78189 + .min_length = 0,
78190 + .max_length = 0,
78191 + .decl_required = false,
78192 + .type_required = false,
78193 + .function_type_required = false,
78194 + .handler = handle_do_const_attribute,
78195 +#if BUILDING_GCC_VERSION >= 4007
78196 + .affects_type_identity = true
78197 +#endif
78198 +};
78199 +
78200 +static void register_attributes(void *event_data, void *data)
78201 +{
78202 + register_attribute(&no_const_attr);
78203 + register_attribute(&do_const_attr);
78204 +}
78205 +
78206 +static void constify_type(tree type)
78207 +{
78208 + TYPE_READONLY(type) = 1;
78209 + C_TYPE_FIELDS_READONLY(type) = 1;
78210 +}
78211 +
78212 +static bool is_fptr(tree field)
78213 +{
78214 + tree ptr = TREE_TYPE(field);
78215 +
78216 + if (TREE_CODE(ptr) != POINTER_TYPE)
78217 + return false;
78218 +
78219 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
78220 +}
78221 +
78222 +static bool walk_struct(tree node)
78223 +{
78224 + tree field;
78225 +
78226 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
78227 + return false;
78228 +
78229 + if (TYPE_FIELDS(node) == NULL_TREE)
78230 + return false;
78231 +
78232 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
78233 + tree type = TREE_TYPE(field);
78234 + enum tree_code code = TREE_CODE(type);
78235 + if (code == RECORD_TYPE || code == UNION_TYPE) {
78236 + if (!(walk_struct(type)))
78237 + return false;
78238 + } else if (!is_fptr(field) && !TREE_READONLY(field))
78239 + return false;
78240 + }
78241 + return true;
78242 +}
78243 +
78244 +static void finish_type(void *event_data, void *data)
78245 +{
78246 + tree type = (tree)event_data;
78247 +
78248 + if (type == NULL_TREE)
78249 + return;
78250 +
78251 + if (TYPE_READONLY(type))
78252 + return;
78253 +
78254 + if (walk_struct(type))
78255 + constify_type(type);
78256 +}
78257 +
78258 +static unsigned int check_local_variables(void);
78259 +
78260 +struct gimple_opt_pass pass_local_variable = {
78261 + {
78262 + .type = GIMPLE_PASS,
78263 + .name = "check_local_variables",
78264 + .gate = NULL,
78265 + .execute = check_local_variables,
78266 + .sub = NULL,
78267 + .next = NULL,
78268 + .static_pass_number = 0,
78269 + .tv_id = TV_NONE,
78270 + .properties_required = 0,
78271 + .properties_provided = 0,
78272 + .properties_destroyed = 0,
78273 + .todo_flags_start = 0,
78274 + .todo_flags_finish = 0
78275 + }
78276 +};
78277 +
78278 +static unsigned int check_local_variables(void)
78279 +{
78280 + tree var;
78281 + referenced_var_iterator rvi;
78282 +
78283 +#if BUILDING_GCC_VERSION == 4005
78284 + FOR_EACH_REFERENCED_VAR(var, rvi) {
78285 +#else
78286 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
78287 +#endif
78288 + tree type = TREE_TYPE(var);
78289 +
78290 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
78291 + continue;
78292 +
78293 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
78294 + continue;
78295 +
78296 + if (!TYPE_READONLY(type))
78297 + continue;
78298 +
78299 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
78300 +// continue;
78301 +
78302 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
78303 +// continue;
78304 +
78305 + if (walk_struct(type)) {
78306 + error("constified variable %qE cannot be local", var);
78307 + return 1;
78308 + }
78309 + }
78310 + return 0;
78311 +}
78312 +
78313 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78314 +{
78315 + const char * const plugin_name = plugin_info->base_name;
78316 + const int argc = plugin_info->argc;
78317 + const struct plugin_argument * const argv = plugin_info->argv;
78318 + int i;
78319 + bool constify = true;
78320 +
78321 + struct register_pass_info local_variable_pass_info = {
78322 + .pass = &pass_local_variable.pass,
78323 + .reference_pass_name = "*referenced_vars",
78324 + .ref_pass_instance_number = 0,
78325 + .pos_op = PASS_POS_INSERT_AFTER
78326 + };
78327 +
78328 + if (!plugin_default_version_check(version, &gcc_version)) {
78329 + error(G_("incompatible gcc/plugin versions"));
78330 + return 1;
78331 + }
78332 +
78333 + for (i = 0; i < argc; ++i) {
78334 + if (!(strcmp(argv[i].key, "no-constify"))) {
78335 + constify = false;
78336 + continue;
78337 + }
78338 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78339 + }
78340 +
78341 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
78342 + if (constify) {
78343 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
78344 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
78345 + }
78346 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
78347 +
78348 + return 0;
78349 +}
78350 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
78351 new file mode 100644
78352 index 0000000..a5eabce
78353 --- /dev/null
78354 +++ b/tools/gcc/kallocstat_plugin.c
78355 @@ -0,0 +1,167 @@
78356 +/*
78357 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78358 + * Licensed under the GPL v2
78359 + *
78360 + * Note: the choice of the license means that the compilation process is
78361 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78362 + * but for the kernel it doesn't matter since it doesn't link against
78363 + * any of the gcc libraries
78364 + *
78365 + * gcc plugin to find the distribution of k*alloc sizes
78366 + *
78367 + * TODO:
78368 + *
78369 + * BUGS:
78370 + * - none known
78371 + */
78372 +#include "gcc-plugin.h"
78373 +#include "config.h"
78374 +#include "system.h"
78375 +#include "coretypes.h"
78376 +#include "tree.h"
78377 +#include "tree-pass.h"
78378 +#include "flags.h"
78379 +#include "intl.h"
78380 +#include "toplev.h"
78381 +#include "plugin.h"
78382 +//#include "expr.h" where are you...
78383 +#include "diagnostic.h"
78384 +#include "plugin-version.h"
78385 +#include "tm.h"
78386 +#include "function.h"
78387 +#include "basic-block.h"
78388 +#include "gimple.h"
78389 +#include "rtl.h"
78390 +#include "emit-rtl.h"
78391 +
78392 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78393 +
78394 +int plugin_is_GPL_compatible;
78395 +
78396 +static const char * const kalloc_functions[] = {
78397 + "__kmalloc",
78398 + "kmalloc",
78399 + "kmalloc_large",
78400 + "kmalloc_node",
78401 + "kmalloc_order",
78402 + "kmalloc_order_trace",
78403 + "kmalloc_slab",
78404 + "kzalloc",
78405 + "kzalloc_node",
78406 +};
78407 +
78408 +static struct plugin_info kallocstat_plugin_info = {
78409 + .version = "201111150100",
78410 +};
78411 +
78412 +static unsigned int execute_kallocstat(void);
78413 +
78414 +static struct gimple_opt_pass kallocstat_pass = {
78415 + .pass = {
78416 + .type = GIMPLE_PASS,
78417 + .name = "kallocstat",
78418 + .gate = NULL,
78419 + .execute = execute_kallocstat,
78420 + .sub = NULL,
78421 + .next = NULL,
78422 + .static_pass_number = 0,
78423 + .tv_id = TV_NONE,
78424 + .properties_required = 0,
78425 + .properties_provided = 0,
78426 + .properties_destroyed = 0,
78427 + .todo_flags_start = 0,
78428 + .todo_flags_finish = 0
78429 + }
78430 +};
78431 +
78432 +static bool is_kalloc(const char *fnname)
78433 +{
78434 + size_t i;
78435 +
78436 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
78437 + if (!strcmp(fnname, kalloc_functions[i]))
78438 + return true;
78439 + return false;
78440 +}
78441 +
78442 +static unsigned int execute_kallocstat(void)
78443 +{
78444 + basic_block bb;
78445 +
78446 + // 1. loop through BBs and GIMPLE statements
78447 + FOR_EACH_BB(bb) {
78448 + gimple_stmt_iterator gsi;
78449 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78450 + // gimple match:
78451 + tree fndecl, size;
78452 + gimple call_stmt;
78453 + const char *fnname;
78454 +
78455 + // is it a call
78456 + call_stmt = gsi_stmt(gsi);
78457 + if (!is_gimple_call(call_stmt))
78458 + continue;
78459 + fndecl = gimple_call_fndecl(call_stmt);
78460 + if (fndecl == NULL_TREE)
78461 + continue;
78462 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
78463 + continue;
78464 +
78465 + // is it a call to k*alloc
78466 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
78467 + if (!is_kalloc(fnname))
78468 + continue;
78469 +
78470 + // is the size arg the result of a simple const assignment
78471 + size = gimple_call_arg(call_stmt, 0);
78472 + while (true) {
78473 + gimple def_stmt;
78474 + expanded_location xloc;
78475 + size_t size_val;
78476 +
78477 + if (TREE_CODE(size) != SSA_NAME)
78478 + break;
78479 + def_stmt = SSA_NAME_DEF_STMT(size);
78480 + if (!def_stmt || !is_gimple_assign(def_stmt))
78481 + break;
78482 + if (gimple_num_ops(def_stmt) != 2)
78483 + break;
78484 + size = gimple_assign_rhs1(def_stmt);
78485 + if (!TREE_CONSTANT(size))
78486 + continue;
78487 + xloc = expand_location(gimple_location(def_stmt));
78488 + if (!xloc.file)
78489 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
78490 + size_val = TREE_INT_CST_LOW(size);
78491 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
78492 + break;
78493 + }
78494 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78495 +//debug_tree(gimple_call_fn(call_stmt));
78496 +//print_node(stderr, "pax", fndecl, 4);
78497 + }
78498 + }
78499 +
78500 + return 0;
78501 +}
78502 +
78503 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78504 +{
78505 + const char * const plugin_name = plugin_info->base_name;
78506 + struct register_pass_info kallocstat_pass_info = {
78507 + .pass = &kallocstat_pass.pass,
78508 + .reference_pass_name = "ssa",
78509 + .ref_pass_instance_number = 0,
78510 + .pos_op = PASS_POS_INSERT_AFTER
78511 + };
78512 +
78513 + if (!plugin_default_version_check(version, &gcc_version)) {
78514 + error(G_("incompatible gcc/plugin versions"));
78515 + return 1;
78516 + }
78517 +
78518 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
78519 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
78520 +
78521 + return 0;
78522 +}
78523 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
78524 new file mode 100644
78525 index 0000000..008f159
78526 --- /dev/null
78527 +++ b/tools/gcc/kernexec_plugin.c
78528 @@ -0,0 +1,427 @@
78529 +/*
78530 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78531 + * Licensed under the GPL v2
78532 + *
78533 + * Note: the choice of the license means that the compilation process is
78534 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78535 + * but for the kernel it doesn't matter since it doesn't link against
78536 + * any of the gcc libraries
78537 + *
78538 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
78539 + *
78540 + * TODO:
78541 + *
78542 + * BUGS:
78543 + * - none known
78544 + */
78545 +#include "gcc-plugin.h"
78546 +#include "config.h"
78547 +#include "system.h"
78548 +#include "coretypes.h"
78549 +#include "tree.h"
78550 +#include "tree-pass.h"
78551 +#include "flags.h"
78552 +#include "intl.h"
78553 +#include "toplev.h"
78554 +#include "plugin.h"
78555 +//#include "expr.h" where are you...
78556 +#include "diagnostic.h"
78557 +#include "plugin-version.h"
78558 +#include "tm.h"
78559 +#include "function.h"
78560 +#include "basic-block.h"
78561 +#include "gimple.h"
78562 +#include "rtl.h"
78563 +#include "emit-rtl.h"
78564 +#include "tree-flow.h"
78565 +
78566 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78567 +extern rtx emit_move_insn(rtx x, rtx y);
78568 +
78569 +int plugin_is_GPL_compatible;
78570 +
78571 +static struct plugin_info kernexec_plugin_info = {
78572 + .version = "201111291120",
78573 + .help = "method=[bts|or]\tinstrumentation method\n"
78574 +};
78575 +
78576 +static unsigned int execute_kernexec_reload(void);
78577 +static unsigned int execute_kernexec_fptr(void);
78578 +static unsigned int execute_kernexec_retaddr(void);
78579 +static bool kernexec_cmodel_check(void);
78580 +
78581 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
78582 +static void (*kernexec_instrument_retaddr)(rtx);
78583 +
78584 +static struct gimple_opt_pass kernexec_reload_pass = {
78585 + .pass = {
78586 + .type = GIMPLE_PASS,
78587 + .name = "kernexec_reload",
78588 + .gate = kernexec_cmodel_check,
78589 + .execute = execute_kernexec_reload,
78590 + .sub = NULL,
78591 + .next = NULL,
78592 + .static_pass_number = 0,
78593 + .tv_id = TV_NONE,
78594 + .properties_required = 0,
78595 + .properties_provided = 0,
78596 + .properties_destroyed = 0,
78597 + .todo_flags_start = 0,
78598 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78599 + }
78600 +};
78601 +
78602 +static struct gimple_opt_pass kernexec_fptr_pass = {
78603 + .pass = {
78604 + .type = GIMPLE_PASS,
78605 + .name = "kernexec_fptr",
78606 + .gate = kernexec_cmodel_check,
78607 + .execute = execute_kernexec_fptr,
78608 + .sub = NULL,
78609 + .next = NULL,
78610 + .static_pass_number = 0,
78611 + .tv_id = TV_NONE,
78612 + .properties_required = 0,
78613 + .properties_provided = 0,
78614 + .properties_destroyed = 0,
78615 + .todo_flags_start = 0,
78616 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78617 + }
78618 +};
78619 +
78620 +static struct rtl_opt_pass kernexec_retaddr_pass = {
78621 + .pass = {
78622 + .type = RTL_PASS,
78623 + .name = "kernexec_retaddr",
78624 + .gate = kernexec_cmodel_check,
78625 + .execute = execute_kernexec_retaddr,
78626 + .sub = NULL,
78627 + .next = NULL,
78628 + .static_pass_number = 0,
78629 + .tv_id = TV_NONE,
78630 + .properties_required = 0,
78631 + .properties_provided = 0,
78632 + .properties_destroyed = 0,
78633 + .todo_flags_start = 0,
78634 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
78635 + }
78636 +};
78637 +
78638 +static bool kernexec_cmodel_check(void)
78639 +{
78640 + tree section;
78641 +
78642 + if (ix86_cmodel != CM_KERNEL)
78643 + return false;
78644 +
78645 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
78646 + if (!section || !TREE_VALUE(section))
78647 + return true;
78648 +
78649 + section = TREE_VALUE(TREE_VALUE(section));
78650 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
78651 + return true;
78652 +
78653 + return false;
78654 +}
78655 +
78656 +/*
78657 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
78658 + */
78659 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
78660 +{
78661 + gimple asm_movabs_stmt;
78662 +
78663 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
78664 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
78665 + gimple_asm_set_volatile(asm_movabs_stmt, true);
78666 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
78667 + update_stmt(asm_movabs_stmt);
78668 +}
78669 +
78670 +/*
78671 + * find all asm() stmts that clobber r10 and add a reload of r10
78672 + */
78673 +static unsigned int execute_kernexec_reload(void)
78674 +{
78675 + basic_block bb;
78676 +
78677 + // 1. loop through BBs and GIMPLE statements
78678 + FOR_EACH_BB(bb) {
78679 + gimple_stmt_iterator gsi;
78680 +
78681 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78682 + // gimple match: __asm__ ("" : : : "r10");
78683 + gimple asm_stmt;
78684 + size_t nclobbers;
78685 +
78686 + // is it an asm ...
78687 + asm_stmt = gsi_stmt(gsi);
78688 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
78689 + continue;
78690 +
78691 + // ... clobbering r10
78692 + nclobbers = gimple_asm_nclobbers(asm_stmt);
78693 + while (nclobbers--) {
78694 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
78695 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
78696 + continue;
78697 + kernexec_reload_fptr_mask(&gsi);
78698 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
78699 + break;
78700 + }
78701 + }
78702 + }
78703 +
78704 + return 0;
78705 +}
78706 +
78707 +/*
78708 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78709 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78710 + */
78711 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
78712 +{
78713 + gimple assign_intptr, assign_new_fptr, call_stmt;
78714 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78715 +
78716 + call_stmt = gsi_stmt(*gsi);
78717 + old_fptr = gimple_call_fn(call_stmt);
78718 +
78719 + // create temporary unsigned long variable used for bitops and cast fptr to it
78720 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
78721 + add_referenced_var(intptr);
78722 + mark_sym_for_renaming(intptr);
78723 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78724 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78725 + update_stmt(assign_intptr);
78726 +
78727 + // apply logical or to temporary unsigned long and bitmask
78728 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78729 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78730 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78731 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78732 + update_stmt(assign_intptr);
78733 +
78734 + // cast temporary unsigned long back to a temporary fptr variable
78735 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
78736 + add_referenced_var(new_fptr);
78737 + mark_sym_for_renaming(new_fptr);
78738 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78739 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
78740 + update_stmt(assign_new_fptr);
78741 +
78742 + // replace call stmt fn with the new fptr
78743 + gimple_call_set_fn(call_stmt, new_fptr);
78744 + update_stmt(call_stmt);
78745 +}
78746 +
78747 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
78748 +{
78749 + gimple asm_or_stmt, call_stmt;
78750 + tree old_fptr, new_fptr, input, output;
78751 + VEC(tree, gc) *inputs = NULL;
78752 + VEC(tree, gc) *outputs = NULL;
78753 +
78754 + call_stmt = gsi_stmt(*gsi);
78755 + old_fptr = gimple_call_fn(call_stmt);
78756 +
78757 + // create temporary fptr variable
78758 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
78759 + add_referenced_var(new_fptr);
78760 + mark_sym_for_renaming(new_fptr);
78761 +
78762 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
78763 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
78764 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
78765 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
78766 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
78767 + VEC_safe_push(tree, gc, inputs, input);
78768 + VEC_safe_push(tree, gc, outputs, output);
78769 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
78770 + gimple_asm_set_volatile(asm_or_stmt, true);
78771 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
78772 + update_stmt(asm_or_stmt);
78773 +
78774 + // replace call stmt fn with the new fptr
78775 + gimple_call_set_fn(call_stmt, new_fptr);
78776 + update_stmt(call_stmt);
78777 +}
78778 +
78779 +/*
78780 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78781 + */
78782 +static unsigned int execute_kernexec_fptr(void)
78783 +{
78784 + basic_block bb;
78785 +
78786 + // 1. loop through BBs and GIMPLE statements
78787 + FOR_EACH_BB(bb) {
78788 + gimple_stmt_iterator gsi;
78789 +
78790 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78791 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78792 + tree fn;
78793 + gimple call_stmt;
78794 +
78795 + // is it a call ...
78796 + call_stmt = gsi_stmt(gsi);
78797 + if (!is_gimple_call(call_stmt))
78798 + continue;
78799 + fn = gimple_call_fn(call_stmt);
78800 + if (TREE_CODE(fn) == ADDR_EXPR)
78801 + continue;
78802 + if (TREE_CODE(fn) != SSA_NAME)
78803 + gcc_unreachable();
78804 +
78805 + // ... through a function pointer
78806 + fn = SSA_NAME_VAR(fn);
78807 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78808 + continue;
78809 + fn = TREE_TYPE(fn);
78810 + if (TREE_CODE(fn) != POINTER_TYPE)
78811 + continue;
78812 + fn = TREE_TYPE(fn);
78813 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78814 + continue;
78815 +
78816 + kernexec_instrument_fptr(&gsi);
78817 +
78818 +//debug_tree(gimple_call_fn(call_stmt));
78819 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78820 + }
78821 + }
78822 +
78823 + return 0;
78824 +}
78825 +
78826 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78827 +static void kernexec_instrument_retaddr_bts(rtx insn)
78828 +{
78829 + rtx btsq;
78830 + rtvec argvec, constraintvec, labelvec;
78831 + int line;
78832 +
78833 + // create asm volatile("btsq $63,(%%rsp)":::)
78834 + argvec = rtvec_alloc(0);
78835 + constraintvec = rtvec_alloc(0);
78836 + labelvec = rtvec_alloc(0);
78837 + line = expand_location(RTL_LOCATION(insn)).line;
78838 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78839 + MEM_VOLATILE_P(btsq) = 1;
78840 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
78841 + emit_insn_before(btsq, insn);
78842 +}
78843 +
78844 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
78845 +static void kernexec_instrument_retaddr_or(rtx insn)
78846 +{
78847 + rtx orq;
78848 + rtvec argvec, constraintvec, labelvec;
78849 + int line;
78850 +
78851 + // create asm volatile("orq %%r10,(%%rsp)":::)
78852 + argvec = rtvec_alloc(0);
78853 + constraintvec = rtvec_alloc(0);
78854 + labelvec = rtvec_alloc(0);
78855 + line = expand_location(RTL_LOCATION(insn)).line;
78856 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78857 + MEM_VOLATILE_P(orq) = 1;
78858 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
78859 + emit_insn_before(orq, insn);
78860 +}
78861 +
78862 +/*
78863 + * find all asm level function returns and forcibly set the highest bit of the return address
78864 + */
78865 +static unsigned int execute_kernexec_retaddr(void)
78866 +{
78867 + rtx insn;
78868 +
78869 + // 1. find function returns
78870 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78871 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78872 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78873 + rtx body;
78874 +
78875 + // is it a retn
78876 + if (!JUMP_P(insn))
78877 + continue;
78878 + body = PATTERN(insn);
78879 + if (GET_CODE(body) == PARALLEL)
78880 + body = XVECEXP(body, 0, 0);
78881 + if (GET_CODE(body) != RETURN)
78882 + continue;
78883 + kernexec_instrument_retaddr(insn);
78884 + }
78885 +
78886 +// print_simple_rtl(stderr, get_insns());
78887 +// print_rtl(stderr, get_insns());
78888 +
78889 + return 0;
78890 +}
78891 +
78892 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78893 +{
78894 + const char * const plugin_name = plugin_info->base_name;
78895 + const int argc = plugin_info->argc;
78896 + const struct plugin_argument * const argv = plugin_info->argv;
78897 + int i;
78898 + struct register_pass_info kernexec_reload_pass_info = {
78899 + .pass = &kernexec_reload_pass.pass,
78900 + .reference_pass_name = "ssa",
78901 + .ref_pass_instance_number = 0,
78902 + .pos_op = PASS_POS_INSERT_AFTER
78903 + };
78904 + struct register_pass_info kernexec_fptr_pass_info = {
78905 + .pass = &kernexec_fptr_pass.pass,
78906 + .reference_pass_name = "ssa",
78907 + .ref_pass_instance_number = 0,
78908 + .pos_op = PASS_POS_INSERT_AFTER
78909 + };
78910 + struct register_pass_info kernexec_retaddr_pass_info = {
78911 + .pass = &kernexec_retaddr_pass.pass,
78912 + .reference_pass_name = "pro_and_epilogue",
78913 + .ref_pass_instance_number = 0,
78914 + .pos_op = PASS_POS_INSERT_AFTER
78915 + };
78916 +
78917 + if (!plugin_default_version_check(version, &gcc_version)) {
78918 + error(G_("incompatible gcc/plugin versions"));
78919 + return 1;
78920 + }
78921 +
78922 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78923 +
78924 + if (TARGET_64BIT == 0)
78925 + return 0;
78926 +
78927 + for (i = 0; i < argc; ++i) {
78928 + if (!strcmp(argv[i].key, "method")) {
78929 + if (!argv[i].value) {
78930 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78931 + continue;
78932 + }
78933 + if (!strcmp(argv[i].value, "bts")) {
78934 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
78935 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
78936 + } else if (!strcmp(argv[i].value, "or")) {
78937 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
78938 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
78939 + fix_register("r10", 1, 1);
78940 + } else
78941 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78942 + continue;
78943 + }
78944 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78945 + }
78946 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
78947 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
78948 +
78949 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
78950 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
78951 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78952 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78953 +
78954 + return 0;
78955 +}
78956 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
78957 new file mode 100644
78958 index 0000000..4a9b187
78959 --- /dev/null
78960 +++ b/tools/gcc/stackleak_plugin.c
78961 @@ -0,0 +1,326 @@
78962 +/*
78963 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78964 + * Licensed under the GPL v2
78965 + *
78966 + * Note: the choice of the license means that the compilation process is
78967 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78968 + * but for the kernel it doesn't matter since it doesn't link against
78969 + * any of the gcc libraries
78970 + *
78971 + * gcc plugin to help implement various PaX features
78972 + *
78973 + * - track lowest stack pointer
78974 + *
78975 + * TODO:
78976 + * - initialize all local variables
78977 + *
78978 + * BUGS:
78979 + * - none known
78980 + */
78981 +#include "gcc-plugin.h"
78982 +#include "config.h"
78983 +#include "system.h"
78984 +#include "coretypes.h"
78985 +#include "tree.h"
78986 +#include "tree-pass.h"
78987 +#include "flags.h"
78988 +#include "intl.h"
78989 +#include "toplev.h"
78990 +#include "plugin.h"
78991 +//#include "expr.h" where are you...
78992 +#include "diagnostic.h"
78993 +#include "plugin-version.h"
78994 +#include "tm.h"
78995 +#include "function.h"
78996 +#include "basic-block.h"
78997 +#include "gimple.h"
78998 +#include "rtl.h"
78999 +#include "emit-rtl.h"
79000 +
79001 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79002 +
79003 +int plugin_is_GPL_compatible;
79004 +
79005 +static int track_frame_size = -1;
79006 +static const char track_function[] = "pax_track_stack";
79007 +static const char check_function[] = "pax_check_alloca";
79008 +static tree pax_check_alloca_decl;
79009 +static tree pax_track_stack_decl;
79010 +static bool init_locals;
79011 +
79012 +static struct plugin_info stackleak_plugin_info = {
79013 + .version = "201203021600",
79014 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
79015 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
79016 +};
79017 +
79018 +static bool gate_stackleak_track_stack(void);
79019 +static unsigned int execute_stackleak_tree_instrument(void);
79020 +static unsigned int execute_stackleak_final(void);
79021 +
79022 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
79023 + .pass = {
79024 + .type = GIMPLE_PASS,
79025 + .name = "stackleak_tree_instrument",
79026 + .gate = gate_stackleak_track_stack,
79027 + .execute = execute_stackleak_tree_instrument,
79028 + .sub = NULL,
79029 + .next = NULL,
79030 + .static_pass_number = 0,
79031 + .tv_id = TV_NONE,
79032 + .properties_required = PROP_gimple_leh | PROP_cfg,
79033 + .properties_provided = 0,
79034 + .properties_destroyed = 0,
79035 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
79036 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
79037 + }
79038 +};
79039 +
79040 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
79041 + .pass = {
79042 + .type = RTL_PASS,
79043 + .name = "stackleak_final",
79044 + .gate = gate_stackleak_track_stack,
79045 + .execute = execute_stackleak_final,
79046 + .sub = NULL,
79047 + .next = NULL,
79048 + .static_pass_number = 0,
79049 + .tv_id = TV_NONE,
79050 + .properties_required = 0,
79051 + .properties_provided = 0,
79052 + .properties_destroyed = 0,
79053 + .todo_flags_start = 0,
79054 + .todo_flags_finish = TODO_dump_func
79055 + }
79056 +};
79057 +
79058 +static bool gate_stackleak_track_stack(void)
79059 +{
79060 + return track_frame_size >= 0;
79061 +}
79062 +
79063 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
79064 +{
79065 + gimple check_alloca;
79066 + tree alloca_size;
79067 +
79068 + // insert call to void pax_check_alloca(unsigned long size)
79069 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
79070 + check_alloca = gimple_build_call(pax_check_alloca_decl, 1, alloca_size);
79071 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
79072 +}
79073 +
79074 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
79075 +{
79076 + gimple track_stack;
79077 +
79078 + // insert call to void pax_track_stack(void)
79079 + track_stack = gimple_build_call(pax_track_stack_decl, 0);
79080 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
79081 +}
79082 +
79083 +#if BUILDING_GCC_VERSION == 4005
79084 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
79085 +{
79086 + tree fndecl;
79087 +
79088 + if (!is_gimple_call(stmt))
79089 + return false;
79090 + fndecl = gimple_call_fndecl(stmt);
79091 + if (!fndecl)
79092 + return false;
79093 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
79094 + return false;
79095 +// print_node(stderr, "pax", fndecl, 4);
79096 + return DECL_FUNCTION_CODE(fndecl) == code;
79097 +}
79098 +#endif
79099 +
79100 +static bool is_alloca(gimple stmt)
79101 +{
79102 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
79103 + return true;
79104 +
79105 +#if BUILDING_GCC_VERSION >= 4007
79106 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
79107 + return true;
79108 +#endif
79109 +
79110 + return false;
79111 +}
79112 +
79113 +static unsigned int execute_stackleak_tree_instrument(void)
79114 +{
79115 + basic_block bb, entry_bb;
79116 + bool prologue_instrumented = false, is_leaf = true;
79117 +
79118 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
79119 +
79120 + // 1. loop through BBs and GIMPLE statements
79121 + FOR_EACH_BB(bb) {
79122 + gimple_stmt_iterator gsi;
79123 +
79124 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79125 + gimple stmt;
79126 +
79127 + stmt = gsi_stmt(gsi);
79128 +
79129 + if (is_gimple_call(stmt))
79130 + is_leaf = false;
79131 +
79132 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
79133 + if (!is_alloca(stmt))
79134 + continue;
79135 +
79136 + // 2. insert stack overflow check before each __builtin_alloca call
79137 + stackleak_check_alloca(&gsi);
79138 +
79139 + // 3. insert track call after each __builtin_alloca call
79140 + stackleak_add_instrumentation(&gsi);
79141 + if (bb == entry_bb)
79142 + prologue_instrumented = true;
79143 + }
79144 + }
79145 +
79146 + // special case for some bad linux code: taking the address of static inline functions will materialize them
79147 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
79148 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
79149 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
79150 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
79151 + return 0;
79152 +
79153 + // 4. insert track call at the beginning
79154 + if (!prologue_instrumented) {
79155 + gimple_stmt_iterator gsi;
79156 +
79157 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
79158 + if (dom_info_available_p(CDI_DOMINATORS))
79159 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
79160 + gsi = gsi_start_bb(bb);
79161 + stackleak_add_instrumentation(&gsi);
79162 + }
79163 +
79164 + return 0;
79165 +}
79166 +
79167 +static unsigned int execute_stackleak_final(void)
79168 +{
79169 + rtx insn;
79170 +
79171 + if (cfun->calls_alloca)
79172 + return 0;
79173 +
79174 + // keep calls only if function frame is big enough
79175 + if (get_frame_size() >= track_frame_size)
79176 + return 0;
79177 +
79178 + // 1. find pax_track_stack calls
79179 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
79180 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
79181 + rtx body;
79182 +
79183 + if (!CALL_P(insn))
79184 + continue;
79185 + body = PATTERN(insn);
79186 + if (GET_CODE(body) != CALL)
79187 + continue;
79188 + body = XEXP(body, 0);
79189 + if (GET_CODE(body) != MEM)
79190 + continue;
79191 + body = XEXP(body, 0);
79192 + if (GET_CODE(body) != SYMBOL_REF)
79193 + continue;
79194 + if (strcmp(XSTR(body, 0), track_function))
79195 + continue;
79196 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79197 + // 2. delete call
79198 + insn = delete_insn_and_edges(insn);
79199 +#if BUILDING_GCC_VERSION >= 4007
79200 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
79201 + insn = delete_insn_and_edges(insn);
79202 +#endif
79203 + }
79204 +
79205 +// print_simple_rtl(stderr, get_insns());
79206 +// print_rtl(stderr, get_insns());
79207 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79208 +
79209 + return 0;
79210 +}
79211 +
79212 +static void stackleak_start_unit(void *gcc_data, void *user_dat)
79213 +{
79214 + tree fntype;
79215 +
79216 + // declare void pax_check_alloca(unsigned long size)
79217 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
79218 + pax_check_alloca_decl = build_fn_decl(check_function, fntype);
79219 + DECL_ASSEMBLER_NAME(pax_check_alloca_decl); // for LTO
79220 + TREE_PUBLIC(pax_check_alloca_decl) = 1;
79221 + DECL_EXTERNAL(pax_check_alloca_decl) = 1;
79222 + DECL_ARTIFICIAL(pax_check_alloca_decl) = 1;
79223 +
79224 + // declare void pax_track_stack(void)
79225 + fntype = build_function_type_list(void_type_node, NULL_TREE);
79226 + pax_track_stack_decl = build_fn_decl(track_function, fntype);
79227 + DECL_ASSEMBLER_NAME(pax_track_stack_decl); // for LTO
79228 + TREE_PUBLIC(pax_track_stack_decl) = 1;
79229 + DECL_EXTERNAL(pax_track_stack_decl) = 1;
79230 + DECL_ARTIFICIAL(pax_track_stack_decl) = 1;
79231 +}
79232 +
79233 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79234 +{
79235 + const char * const plugin_name = plugin_info->base_name;
79236 + const int argc = plugin_info->argc;
79237 + const struct plugin_argument * const argv = plugin_info->argv;
79238 + int i;
79239 + struct register_pass_info stackleak_tree_instrument_pass_info = {
79240 + .pass = &stackleak_tree_instrument_pass.pass,
79241 +// .reference_pass_name = "tree_profile",
79242 + .reference_pass_name = "optimized",
79243 + .ref_pass_instance_number = 0,
79244 + .pos_op = PASS_POS_INSERT_BEFORE
79245 + };
79246 + struct register_pass_info stackleak_final_pass_info = {
79247 + .pass = &stackleak_final_rtl_opt_pass.pass,
79248 + .reference_pass_name = "final",
79249 + .ref_pass_instance_number = 0,
79250 + .pos_op = PASS_POS_INSERT_BEFORE
79251 + };
79252 +
79253 + if (!plugin_default_version_check(version, &gcc_version)) {
79254 + error(G_("incompatible gcc/plugin versions"));
79255 + return 1;
79256 + }
79257 +
79258 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
79259 +
79260 + for (i = 0; i < argc; ++i) {
79261 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
79262 + if (!argv[i].value) {
79263 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79264 + continue;
79265 + }
79266 + track_frame_size = atoi(argv[i].value);
79267 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
79268 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79269 + continue;
79270 + }
79271 + if (!strcmp(argv[i].key, "initialize-locals")) {
79272 + if (argv[i].value) {
79273 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79274 + continue;
79275 + }
79276 + init_locals = true;
79277 + continue;
79278 + }
79279 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79280 + }
79281 +
79282 + register_callback("start_unit", PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
79283 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
79284 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
79285 +
79286 + return 0;
79287 +}
79288 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
79289 index 6789d78..4afd019 100644
79290 --- a/tools/perf/util/include/asm/alternative-asm.h
79291 +++ b/tools/perf/util/include/asm/alternative-asm.h
79292 @@ -5,4 +5,7 @@
79293
79294 #define altinstruction_entry #
79295
79296 + .macro pax_force_retaddr rip=0, reload=0
79297 + .endm
79298 +
79299 #endif
79300 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
79301 index af0f22f..9a7d479 100644
79302 --- a/usr/gen_init_cpio.c
79303 +++ b/usr/gen_init_cpio.c
79304 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
79305 int retval;
79306 int rc = -1;
79307 int namesize;
79308 - int i;
79309 + unsigned int i;
79310
79311 mode |= S_IFREG;
79312
79313 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
79314 *env_var = *expanded = '\0';
79315 strncat(env_var, start + 2, end - start - 2);
79316 strncat(expanded, new_location, start - new_location);
79317 - strncat(expanded, getenv(env_var), PATH_MAX);
79318 - strncat(expanded, end + 1, PATH_MAX);
79319 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
79320 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
79321 strncpy(new_location, expanded, PATH_MAX);
79322 + new_location[PATH_MAX] = 0;
79323 } else
79324 break;
79325 }
79326 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
79327 index d9cfb78..4f27c10 100644
79328 --- a/virt/kvm/kvm_main.c
79329 +++ b/virt/kvm/kvm_main.c
79330 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
79331
79332 static cpumask_var_t cpus_hardware_enabled;
79333 static int kvm_usage_count = 0;
79334 -static atomic_t hardware_enable_failed;
79335 +static atomic_unchecked_t hardware_enable_failed;
79336
79337 struct kmem_cache *kvm_vcpu_cache;
79338 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
79339 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
79340
79341 if (r) {
79342 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
79343 - atomic_inc(&hardware_enable_failed);
79344 + atomic_inc_unchecked(&hardware_enable_failed);
79345 printk(KERN_INFO "kvm: enabling virtualization on "
79346 "CPU%d failed\n", cpu);
79347 }
79348 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
79349
79350 kvm_usage_count++;
79351 if (kvm_usage_count == 1) {
79352 - atomic_set(&hardware_enable_failed, 0);
79353 + atomic_set_unchecked(&hardware_enable_failed, 0);
79354 on_each_cpu(hardware_enable_nolock, NULL, 1);
79355
79356 - if (atomic_read(&hardware_enable_failed)) {
79357 + if (atomic_read_unchecked(&hardware_enable_failed)) {
79358 hardware_disable_all_nolock();
79359 r = -EBUSY;
79360 }
79361 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
79362 kvm_arch_vcpu_put(vcpu);
79363 }
79364
79365 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79366 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79367 struct module *module)
79368 {
79369 int r;
79370 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79371 if (!vcpu_align)
79372 vcpu_align = __alignof__(struct kvm_vcpu);
79373 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
79374 - 0, NULL);
79375 + SLAB_USERCOPY, NULL);
79376 if (!kvm_vcpu_cache) {
79377 r = -ENOMEM;
79378 goto out_free_3;
79379 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79380 if (r)
79381 goto out_free;
79382
79383 - kvm_chardev_ops.owner = module;
79384 - kvm_vm_fops.owner = module;
79385 - kvm_vcpu_fops.owner = module;
79386 + pax_open_kernel();
79387 + *(void **)&kvm_chardev_ops.owner = module;
79388 + *(void **)&kvm_vm_fops.owner = module;
79389 + *(void **)&kvm_vcpu_fops.owner = module;
79390 + pax_close_kernel();
79391
79392 r = misc_register(&kvm_dev);
79393 if (r) {