]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.2.7-201202261954.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.7-201202261954.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index 81c287f..d456d02 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index d1bdc90..c985d2a 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,48 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
234 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
235 +endif
236 +ifdef CONFIG_CHECKER_PLUGIN
237 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
238 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
239 +endif
240 +endif
241 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
242 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
243 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
244 +ifeq ($(KBUILD_EXTMOD),)
245 +gcc-plugins:
246 + $(Q)$(MAKE) $(build)=tools/gcc
247 +else
248 +gcc-plugins: ;
249 +endif
250 +else
251 +gcc-plugins:
252 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
253 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
254 +else
255 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
256 +endif
257 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
258 +endif
259 +endif
260 +
261 include $(srctree)/arch/$(SRCARCH)/Makefile
262
263 ifneq ($(CONFIG_FRAME_WARN),0)
264 @@ -708,7 +751,7 @@ export mod_strip_cmd
265
266
267 ifeq ($(KBUILD_EXTMOD),)
268 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
269 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
270
271 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
272 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
273 @@ -932,6 +975,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
274
275 # The actual objects are generated when descending,
276 # make sure no implicit rule kicks in
277 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
278 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
279 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280
281 # Handle descending into subdirectories listed in $(vmlinux-dirs)
282 @@ -941,7 +986,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
283 # Error messages still appears in the original language
284
285 PHONY += $(vmlinux-dirs)
286 -$(vmlinux-dirs): prepare scripts
287 +$(vmlinux-dirs): gcc-plugins prepare scripts
288 $(Q)$(MAKE) $(build)=$@
289
290 # Store (new) KERNELRELASE string in include/config/kernel.release
291 @@ -985,6 +1030,7 @@ prepare0: archprepare FORCE
292 $(Q)$(MAKE) $(build)=.
293
294 # All the preparing..
295 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
296 prepare: prepare0
297
298 # Generate some files
299 @@ -1086,6 +1132,8 @@ all: modules
300 # using awk while concatenating to the final file.
301
302 PHONY += modules
303 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
304 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
305 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
306 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
307 @$(kecho) ' Building modules, stage 2.';
308 @@ -1101,7 +1149,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
309
310 # Target to prepare building external modules
311 PHONY += modules_prepare
312 -modules_prepare: prepare scripts
313 +modules_prepare: gcc-plugins prepare scripts
314
315 # Target to install modules
316 PHONY += modules_install
317 @@ -1198,6 +1246,7 @@ distclean: mrproper
318 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
319 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
320 -o -name '.*.rej' \
321 + -o -name '.*.rej' -o -name '*.so' \
322 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
323 -type f -print | xargs rm -f
324
325 @@ -1358,6 +1407,8 @@ PHONY += $(module-dirs) modules
326 $(module-dirs): crmodverdir $(objtree)/Module.symvers
327 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
328
329 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
330 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
331 modules: $(module-dirs)
332 @$(kecho) ' Building modules, stage 2.';
333 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
334 @@ -1484,17 +1535,21 @@ else
335 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
336 endif
337
338 -%.s: %.c prepare scripts FORCE
339 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
341 +%.s: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.i: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.o: %.c prepare scripts FORCE
346 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348 +%.o: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.lst: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 -%.s: %.S prepare scripts FORCE
353 +%.s: %.S gcc-plugins prepare scripts FORCE
354 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
355 -%.o: %.S prepare scripts FORCE
356 +%.o: %.S gcc-plugins prepare scripts FORCE
357 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
358 %.symtypes: %.c prepare scripts FORCE
359 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
360 @@ -1504,11 +1559,15 @@ endif
361 $(cmd_crmodverdir)
362 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
363 $(build)=$(build-dir)
364 -%/: prepare scripts FORCE
365 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
367 +%/: gcc-plugins prepare scripts FORCE
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371 -%.ko: prepare scripts FORCE
372 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374 +%.ko: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir) $(@:.ko=.o)
378 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
379 index 640f909..48b6597 100644
380 --- a/arch/alpha/include/asm/atomic.h
381 +++ b/arch/alpha/include/asm/atomic.h
382 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
383 #define atomic_dec(v) atomic_sub(1,(v))
384 #define atomic64_dec(v) atomic64_sub(1,(v))
385
386 +#define atomic64_read_unchecked(v) atomic64_read(v)
387 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
388 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
389 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
390 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
391 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
392 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
393 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
394 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
395 +
396 #define smp_mb__before_atomic_dec() smp_mb()
397 #define smp_mb__after_atomic_dec() smp_mb()
398 #define smp_mb__before_atomic_inc() smp_mb()
399 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
400 index da5449e..7418343 100644
401 --- a/arch/alpha/include/asm/elf.h
402 +++ b/arch/alpha/include/asm/elf.h
403 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
404
405 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
406
407 +#ifdef CONFIG_PAX_ASLR
408 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
409 +
410 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
411 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
412 +#endif
413 +
414 /* $0 is set by ld.so to a pointer to a function which might be
415 registered using atexit. This provides a mean for the dynamic
416 linker to call DT_FINI functions for shared libraries that have
417 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
418 index de98a73..bd4f1f8 100644
419 --- a/arch/alpha/include/asm/pgtable.h
420 +++ b/arch/alpha/include/asm/pgtable.h
421 @@ -101,6 +101,17 @@ struct vm_area_struct;
422 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
423 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
424 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
425 +
426 +#ifdef CONFIG_PAX_PAGEEXEC
427 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
428 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
429 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
430 +#else
431 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
432 +# define PAGE_COPY_NOEXEC PAGE_COPY
433 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
434 +#endif
435 +
436 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
437
438 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
439 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
440 index 2fd00b7..cfd5069 100644
441 --- a/arch/alpha/kernel/module.c
442 +++ b/arch/alpha/kernel/module.c
443 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
444
445 /* The small sections were sorted to the end of the segment.
446 The following should definitely cover them. */
447 - gp = (u64)me->module_core + me->core_size - 0x8000;
448 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
449 got = sechdrs[me->arch.gotsecindex].sh_addr;
450
451 for (i = 0; i < n; i++) {
452 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
453 index 01e8715..be0e80f 100644
454 --- a/arch/alpha/kernel/osf_sys.c
455 +++ b/arch/alpha/kernel/osf_sys.c
456 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
457 /* At this point: (!vma || addr < vma->vm_end). */
458 if (limit - len < addr)
459 return -ENOMEM;
460 - if (!vma || addr + len <= vma->vm_start)
461 + if (check_heap_stack_gap(vma, addr, len))
462 return addr;
463 addr = vma->vm_end;
464 vma = vma->vm_next;
465 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
466 merely specific addresses, but regions of memory -- perhaps
467 this feature should be incorporated into all ports? */
468
469 +#ifdef CONFIG_PAX_RANDMMAP
470 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
471 +#endif
472 +
473 if (addr) {
474 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
475 if (addr != (unsigned long) -ENOMEM)
476 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
477 }
478
479 /* Next, try allocating at TASK_UNMAPPED_BASE. */
480 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
481 - len, limit);
482 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
483 +
484 if (addr != (unsigned long) -ENOMEM)
485 return addr;
486
487 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
488 index fadd5f8..904e73a 100644
489 --- a/arch/alpha/mm/fault.c
490 +++ b/arch/alpha/mm/fault.c
491 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
492 __reload_thread(pcb);
493 }
494
495 +#ifdef CONFIG_PAX_PAGEEXEC
496 +/*
497 + * PaX: decide what to do with offenders (regs->pc = fault address)
498 + *
499 + * returns 1 when task should be killed
500 + * 2 when patched PLT trampoline was detected
501 + * 3 when unpatched PLT trampoline was detected
502 + */
503 +static int pax_handle_fetch_fault(struct pt_regs *regs)
504 +{
505 +
506 +#ifdef CONFIG_PAX_EMUPLT
507 + int err;
508 +
509 + do { /* PaX: patched PLT emulation #1 */
510 + unsigned int ldah, ldq, jmp;
511 +
512 + err = get_user(ldah, (unsigned int *)regs->pc);
513 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
514 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
515 +
516 + if (err)
517 + break;
518 +
519 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
520 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
521 + jmp == 0x6BFB0000U)
522 + {
523 + unsigned long r27, addr;
524 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
525 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
526 +
527 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
528 + err = get_user(r27, (unsigned long *)addr);
529 + if (err)
530 + break;
531 +
532 + regs->r27 = r27;
533 + regs->pc = r27;
534 + return 2;
535 + }
536 + } while (0);
537 +
538 + do { /* PaX: patched PLT emulation #2 */
539 + unsigned int ldah, lda, br;
540 +
541 + err = get_user(ldah, (unsigned int *)regs->pc);
542 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
543 + err |= get_user(br, (unsigned int *)(regs->pc+8));
544 +
545 + if (err)
546 + break;
547 +
548 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
549 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
550 + (br & 0xFFE00000U) == 0xC3E00000U)
551 + {
552 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
553 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
554 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
555 +
556 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
557 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
558 + return 2;
559 + }
560 + } while (0);
561 +
562 + do { /* PaX: unpatched PLT emulation */
563 + unsigned int br;
564 +
565 + err = get_user(br, (unsigned int *)regs->pc);
566 +
567 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
568 + unsigned int br2, ldq, nop, jmp;
569 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
570 +
571 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
572 + err = get_user(br2, (unsigned int *)addr);
573 + err |= get_user(ldq, (unsigned int *)(addr+4));
574 + err |= get_user(nop, (unsigned int *)(addr+8));
575 + err |= get_user(jmp, (unsigned int *)(addr+12));
576 + err |= get_user(resolver, (unsigned long *)(addr+16));
577 +
578 + if (err)
579 + break;
580 +
581 + if (br2 == 0xC3600000U &&
582 + ldq == 0xA77B000CU &&
583 + nop == 0x47FF041FU &&
584 + jmp == 0x6B7B0000U)
585 + {
586 + regs->r28 = regs->pc+4;
587 + regs->r27 = addr+16;
588 + regs->pc = resolver;
589 + return 3;
590 + }
591 + }
592 + } while (0);
593 +#endif
594 +
595 + return 1;
596 +}
597 +
598 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
599 +{
600 + unsigned long i;
601 +
602 + printk(KERN_ERR "PAX: bytes at PC: ");
603 + for (i = 0; i < 5; i++) {
604 + unsigned int c;
605 + if (get_user(c, (unsigned int *)pc+i))
606 + printk(KERN_CONT "???????? ");
607 + else
608 + printk(KERN_CONT "%08x ", c);
609 + }
610 + printk("\n");
611 +}
612 +#endif
613
614 /*
615 * This routine handles page faults. It determines the address,
616 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
617 good_area:
618 si_code = SEGV_ACCERR;
619 if (cause < 0) {
620 - if (!(vma->vm_flags & VM_EXEC))
621 + if (!(vma->vm_flags & VM_EXEC)) {
622 +
623 +#ifdef CONFIG_PAX_PAGEEXEC
624 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
625 + goto bad_area;
626 +
627 + up_read(&mm->mmap_sem);
628 + switch (pax_handle_fetch_fault(regs)) {
629 +
630 +#ifdef CONFIG_PAX_EMUPLT
631 + case 2:
632 + case 3:
633 + return;
634 +#endif
635 +
636 + }
637 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
638 + do_group_exit(SIGKILL);
639 +#else
640 goto bad_area;
641 +#endif
642 +
643 + }
644 } else if (!cause) {
645 /* Allow reads even for write-only mappings */
646 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
647 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
648 index 86976d0..6610950 100644
649 --- a/arch/arm/include/asm/atomic.h
650 +++ b/arch/arm/include/asm/atomic.h
651 @@ -15,6 +15,10 @@
652 #include <linux/types.h>
653 #include <asm/system.h>
654
655 +#ifdef CONFIG_GENERIC_ATOMIC64
656 +#include <asm-generic/atomic64.h>
657 +#endif
658 +
659 #define ATOMIC_INIT(i) { (i) }
660
661 #ifdef __KERNEL__
662 @@ -239,6 +243,14 @@ typedef struct {
663 u64 __aligned(8) counter;
664 } atomic64_t;
665
666 +#ifdef CONFIG_PAX_REFCOUNT
667 +typedef struct {
668 + u64 __aligned(8) counter;
669 +} atomic64_unchecked_t;
670 +#else
671 +typedef atomic64_t atomic64_unchecked_t;
672 +#endif
673 +
674 #define ATOMIC64_INIT(i) { (i) }
675
676 static inline u64 atomic64_read(atomic64_t *v)
677 @@ -459,6 +471,16 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
678 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
679 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
680
681 +#define atomic64_read_unchecked(v) atomic64_read(v)
682 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
683 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
684 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
685 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
686 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
687 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
688 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
689 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
690 +
691 #endif /* !CONFIG_GENERIC_ATOMIC64 */
692 #endif
693 #endif
694 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
695 index 0e9ce8d..6ef1e03 100644
696 --- a/arch/arm/include/asm/elf.h
697 +++ b/arch/arm/include/asm/elf.h
698 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
699 the loader. We need to make sure that it is out of the way of the program
700 that it will "exec", and that there is sufficient room for the brk. */
701
702 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
703 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
704 +
705 +#ifdef CONFIG_PAX_ASLR
706 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
707 +
708 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
709 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
710 +#endif
711
712 /* When the program starts, a1 contains a pointer to a function to be
713 registered with atexit, as per the SVR4 ABI. A value of 0 means we
714 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
715 extern void elf_set_personality(const struct elf32_hdr *);
716 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
717
718 -struct mm_struct;
719 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
720 -#define arch_randomize_brk arch_randomize_brk
721 -
722 extern int vectors_user_mapping(void);
723 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
724 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
725 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
726 index e51b1e8..32a3113 100644
727 --- a/arch/arm/include/asm/kmap_types.h
728 +++ b/arch/arm/include/asm/kmap_types.h
729 @@ -21,6 +21,7 @@ enum km_type {
730 KM_L1_CACHE,
731 KM_L2_CACHE,
732 KM_KDB,
733 + KM_CLEARPAGE,
734 KM_TYPE_NR
735 };
736
737 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
738 index b293616..96310e5 100644
739 --- a/arch/arm/include/asm/uaccess.h
740 +++ b/arch/arm/include/asm/uaccess.h
741 @@ -22,6 +22,8 @@
742 #define VERIFY_READ 0
743 #define VERIFY_WRITE 1
744
745 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
746 +
747 /*
748 * The exception table consists of pairs of addresses: the first is the
749 * address of an instruction that is allowed to fault, and the second is
750 @@ -387,8 +389,23 @@ do { \
751
752
753 #ifdef CONFIG_MMU
754 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
755 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
756 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
757 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
758 +
759 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
760 +{
761 + if (!__builtin_constant_p(n))
762 + check_object_size(to, n, false);
763 + return ___copy_from_user(to, from, n);
764 +}
765 +
766 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
767 +{
768 + if (!__builtin_constant_p(n))
769 + check_object_size(from, n, true);
770 + return ___copy_to_user(to, from, n);
771 +}
772 +
773 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
774 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
775 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
776 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
777
778 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
779 {
780 + if ((long)n < 0)
781 + return n;
782 +
783 if (access_ok(VERIFY_READ, from, n))
784 n = __copy_from_user(to, from, n);
785 else /* security hole - plug it */
786 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
787
788 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
789 {
790 + if ((long)n < 0)
791 + return n;
792 +
793 if (access_ok(VERIFY_WRITE, to, n))
794 n = __copy_to_user(to, from, n);
795 return n;
796 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
797 index 5b0bce6..becd81c 100644
798 --- a/arch/arm/kernel/armksyms.c
799 +++ b/arch/arm/kernel/armksyms.c
800 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
801 #ifdef CONFIG_MMU
802 EXPORT_SYMBOL(copy_page);
803
804 -EXPORT_SYMBOL(__copy_from_user);
805 -EXPORT_SYMBOL(__copy_to_user);
806 +EXPORT_SYMBOL(___copy_from_user);
807 +EXPORT_SYMBOL(___copy_to_user);
808 EXPORT_SYMBOL(__clear_user);
809
810 EXPORT_SYMBOL(__get_user_1);
811 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
812 index 3d0c6fb..3dcae52 100644
813 --- a/arch/arm/kernel/process.c
814 +++ b/arch/arm/kernel/process.c
815 @@ -28,7 +28,6 @@
816 #include <linux/tick.h>
817 #include <linux/utsname.h>
818 #include <linux/uaccess.h>
819 -#include <linux/random.h>
820 #include <linux/hw_breakpoint.h>
821 #include <linux/cpuidle.h>
822
823 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
824 return 0;
825 }
826
827 -unsigned long arch_randomize_brk(struct mm_struct *mm)
828 -{
829 - unsigned long range_end = mm->brk + 0x02000000;
830 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
831 -}
832 -
833 #ifdef CONFIG_MMU
834 /*
835 * The vectors page is always readable from user space for the
836 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
837 index 99a5727..a3d5bb1 100644
838 --- a/arch/arm/kernel/traps.c
839 +++ b/arch/arm/kernel/traps.c
840 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
841
842 static DEFINE_RAW_SPINLOCK(die_lock);
843
844 +extern void gr_handle_kernel_exploit(void);
845 +
846 /*
847 * This function is protected against re-entrancy.
848 */
849 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
850 panic("Fatal exception in interrupt");
851 if (panic_on_oops)
852 panic("Fatal exception");
853 +
854 + gr_handle_kernel_exploit();
855 +
856 if (ret != NOTIFY_STOP)
857 do_exit(SIGSEGV);
858 }
859 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
860 index 66a477a..bee61d3 100644
861 --- a/arch/arm/lib/copy_from_user.S
862 +++ b/arch/arm/lib/copy_from_user.S
863 @@ -16,7 +16,7 @@
864 /*
865 * Prototype:
866 *
867 - * size_t __copy_from_user(void *to, const void *from, size_t n)
868 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
869 *
870 * Purpose:
871 *
872 @@ -84,11 +84,11 @@
873
874 .text
875
876 -ENTRY(__copy_from_user)
877 +ENTRY(___copy_from_user)
878
879 #include "copy_template.S"
880
881 -ENDPROC(__copy_from_user)
882 +ENDPROC(___copy_from_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
887 index d066df6..df28194 100644
888 --- a/arch/arm/lib/copy_to_user.S
889 +++ b/arch/arm/lib/copy_to_user.S
890 @@ -16,7 +16,7 @@
891 /*
892 * Prototype:
893 *
894 - * size_t __copy_to_user(void *to, const void *from, size_t n)
895 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
896 *
897 * Purpose:
898 *
899 @@ -88,11 +88,11 @@
900 .text
901
902 ENTRY(__copy_to_user_std)
903 -WEAK(__copy_to_user)
904 +WEAK(___copy_to_user)
905
906 #include "copy_template.S"
907
908 -ENDPROC(__copy_to_user)
909 +ENDPROC(___copy_to_user)
910 ENDPROC(__copy_to_user_std)
911
912 .pushsection .fixup,"ax"
913 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
914 index d0ece2a..5ae2f39 100644
915 --- a/arch/arm/lib/uaccess.S
916 +++ b/arch/arm/lib/uaccess.S
917 @@ -20,7 +20,7 @@
918
919 #define PAGE_SHIFT 12
920
921 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
922 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
923 * Purpose : copy a block to user memory from kernel memory
924 * Params : to - user memory
925 * : from - kernel memory
926 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
927 sub r2, r2, ip
928 b .Lc2u_dest_aligned
929
930 -ENTRY(__copy_to_user)
931 +ENTRY(___copy_to_user)
932 stmfd sp!, {r2, r4 - r7, lr}
933 cmp r2, #4
934 blt .Lc2u_not_enough
935 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
936 ldrgtb r3, [r1], #0
937 USER( T(strgtb) r3, [r0], #1) @ May fault
938 b .Lc2u_finished
939 -ENDPROC(__copy_to_user)
940 +ENDPROC(___copy_to_user)
941
942 .pushsection .fixup,"ax"
943 .align 0
944 9001: ldmfd sp!, {r0, r4 - r7, pc}
945 .popsection
946
947 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
948 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
949 * Purpose : copy a block from user memory to kernel memory
950 * Params : to - kernel memory
951 * : from - user memory
952 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
953 sub r2, r2, ip
954 b .Lcfu_dest_aligned
955
956 -ENTRY(__copy_from_user)
957 +ENTRY(___copy_from_user)
958 stmfd sp!, {r0, r2, r4 - r7, lr}
959 cmp r2, #4
960 blt .Lcfu_not_enough
961 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
962 USER( T(ldrgtb) r3, [r1], #1) @ May fault
963 strgtb r3, [r0], #1
964 b .Lcfu_finished
965 -ENDPROC(__copy_from_user)
966 +ENDPROC(___copy_from_user)
967
968 .pushsection .fixup,"ax"
969 .align 0
970 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
971 index 025f742..8432b08 100644
972 --- a/arch/arm/lib/uaccess_with_memcpy.c
973 +++ b/arch/arm/lib/uaccess_with_memcpy.c
974 @@ -104,7 +104,7 @@ out:
975 }
976
977 unsigned long
978 -__copy_to_user(void __user *to, const void *from, unsigned long n)
979 +___copy_to_user(void __user *to, const void *from, unsigned long n)
980 {
981 /*
982 * This test is stubbed out of the main function above to keep
983 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
984 index 2b2d51c..0127490 100644
985 --- a/arch/arm/mach-ux500/mbox-db5500.c
986 +++ b/arch/arm/mach-ux500/mbox-db5500.c
987 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
988 return sprintf(buf, "0x%X\n", mbox_value);
989 }
990
991 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
992 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
993
994 static int mbox_show(struct seq_file *s, void *data)
995 {
996 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
997 index aa33949..b242a2f 100644
998 --- a/arch/arm/mm/fault.c
999 +++ b/arch/arm/mm/fault.c
1000 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1001 }
1002 #endif
1003
1004 +#ifdef CONFIG_PAX_PAGEEXEC
1005 + if (fsr & FSR_LNX_PF) {
1006 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1007 + do_group_exit(SIGKILL);
1008 + }
1009 +#endif
1010 +
1011 tsk->thread.address = addr;
1012 tsk->thread.error_code = fsr;
1013 tsk->thread.trap_no = 14;
1014 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1015 }
1016 #endif /* CONFIG_MMU */
1017
1018 +#ifdef CONFIG_PAX_PAGEEXEC
1019 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1020 +{
1021 + long i;
1022 +
1023 + printk(KERN_ERR "PAX: bytes at PC: ");
1024 + for (i = 0; i < 20; i++) {
1025 + unsigned char c;
1026 + if (get_user(c, (__force unsigned char __user *)pc+i))
1027 + printk(KERN_CONT "?? ");
1028 + else
1029 + printk(KERN_CONT "%02x ", c);
1030 + }
1031 + printk("\n");
1032 +
1033 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1034 + for (i = -1; i < 20; i++) {
1035 + unsigned long c;
1036 + if (get_user(c, (__force unsigned long __user *)sp+i))
1037 + printk(KERN_CONT "???????? ");
1038 + else
1039 + printk(KERN_CONT "%08lx ", c);
1040 + }
1041 + printk("\n");
1042 +}
1043 +#endif
1044 +
1045 /*
1046 * First Level Translation Fault Handler
1047 *
1048 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1049 index 44b628e..623ee2a 100644
1050 --- a/arch/arm/mm/mmap.c
1051 +++ b/arch/arm/mm/mmap.c
1052 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1053 if (len > TASK_SIZE)
1054 return -ENOMEM;
1055
1056 +#ifdef CONFIG_PAX_RANDMMAP
1057 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1058 +#endif
1059 +
1060 if (addr) {
1061 if (do_align)
1062 addr = COLOUR_ALIGN(addr, pgoff);
1063 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1064 addr = PAGE_ALIGN(addr);
1065
1066 vma = find_vma(mm, addr);
1067 - if (TASK_SIZE - len >= addr &&
1068 - (!vma || addr + len <= vma->vm_start))
1069 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1070 return addr;
1071 }
1072 if (len > mm->cached_hole_size) {
1073 - start_addr = addr = mm->free_area_cache;
1074 + start_addr = addr = mm->free_area_cache;
1075 } else {
1076 - start_addr = addr = TASK_UNMAPPED_BASE;
1077 - mm->cached_hole_size = 0;
1078 + start_addr = addr = mm->mmap_base;
1079 + mm->cached_hole_size = 0;
1080 }
1081 /* 8 bits of randomness in 20 address space bits */
1082 if ((current->flags & PF_RANDOMIZE) &&
1083 @@ -89,14 +92,14 @@ full_search:
1084 * Start a new search - just in case we missed
1085 * some holes.
1086 */
1087 - if (start_addr != TASK_UNMAPPED_BASE) {
1088 - start_addr = addr = TASK_UNMAPPED_BASE;
1089 + if (start_addr != mm->mmap_base) {
1090 + start_addr = addr = mm->mmap_base;
1091 mm->cached_hole_size = 0;
1092 goto full_search;
1093 }
1094 return -ENOMEM;
1095 }
1096 - if (!vma || addr + len <= vma->vm_start) {
1097 + if (check_heap_stack_gap(vma, addr, len)) {
1098 /*
1099 * Remember the place where we stopped the search:
1100 */
1101 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1102 index 3b3159b..425ea94 100644
1103 --- a/arch/avr32/include/asm/elf.h
1104 +++ b/arch/avr32/include/asm/elf.h
1105 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1106 the loader. We need to make sure that it is out of the way of the program
1107 that it will "exec", and that there is sufficient room for the brk. */
1108
1109 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1110 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1111
1112 +#ifdef CONFIG_PAX_ASLR
1113 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1114 +
1115 +#define PAX_DELTA_MMAP_LEN 15
1116 +#define PAX_DELTA_STACK_LEN 15
1117 +#endif
1118
1119 /* This yields a mask that user programs can use to figure out what
1120 instruction set this CPU supports. This could be done in user space,
1121 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1122 index b7f5c68..556135c 100644
1123 --- a/arch/avr32/include/asm/kmap_types.h
1124 +++ b/arch/avr32/include/asm/kmap_types.h
1125 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1126 D(11) KM_IRQ1,
1127 D(12) KM_SOFTIRQ0,
1128 D(13) KM_SOFTIRQ1,
1129 -D(14) KM_TYPE_NR
1130 +D(14) KM_CLEARPAGE,
1131 +D(15) KM_TYPE_NR
1132 };
1133
1134 #undef D
1135 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1136 index f7040a1..db9f300 100644
1137 --- a/arch/avr32/mm/fault.c
1138 +++ b/arch/avr32/mm/fault.c
1139 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1140
1141 int exception_trace = 1;
1142
1143 +#ifdef CONFIG_PAX_PAGEEXEC
1144 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1145 +{
1146 + unsigned long i;
1147 +
1148 + printk(KERN_ERR "PAX: bytes at PC: ");
1149 + for (i = 0; i < 20; i++) {
1150 + unsigned char c;
1151 + if (get_user(c, (unsigned char *)pc+i))
1152 + printk(KERN_CONT "???????? ");
1153 + else
1154 + printk(KERN_CONT "%02x ", c);
1155 + }
1156 + printk("\n");
1157 +}
1158 +#endif
1159 +
1160 /*
1161 * This routine handles page faults. It determines the address and the
1162 * problem, and then passes it off to one of the appropriate routines.
1163 @@ -156,6 +173,16 @@ bad_area:
1164 up_read(&mm->mmap_sem);
1165
1166 if (user_mode(regs)) {
1167 +
1168 +#ifdef CONFIG_PAX_PAGEEXEC
1169 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1170 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1171 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1172 + do_group_exit(SIGKILL);
1173 + }
1174 + }
1175 +#endif
1176 +
1177 if (exception_trace && printk_ratelimit())
1178 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1179 "sp %08lx ecr %lu\n",
1180 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1181 index 0d8a7d6..d0c9ff5 100644
1182 --- a/arch/frv/include/asm/atomic.h
1183 +++ b/arch/frv/include/asm/atomic.h
1184 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1185 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1186 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1187
1188 +#define atomic64_read_unchecked(v) atomic64_read(v)
1189 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1190 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1191 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1192 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1193 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1194 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1195 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1196 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1197 +
1198 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
1199 {
1200 int c, old;
1201 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1202 index f8e16b2..c73ff79 100644
1203 --- a/arch/frv/include/asm/kmap_types.h
1204 +++ b/arch/frv/include/asm/kmap_types.h
1205 @@ -23,6 +23,7 @@ enum km_type {
1206 KM_IRQ1,
1207 KM_SOFTIRQ0,
1208 KM_SOFTIRQ1,
1209 + KM_CLEARPAGE,
1210 KM_TYPE_NR
1211 };
1212
1213 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1214 index 385fd30..6c3d97e 100644
1215 --- a/arch/frv/mm/elf-fdpic.c
1216 +++ b/arch/frv/mm/elf-fdpic.c
1217 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1218 if (addr) {
1219 addr = PAGE_ALIGN(addr);
1220 vma = find_vma(current->mm, addr);
1221 - if (TASK_SIZE - len >= addr &&
1222 - (!vma || addr + len <= vma->vm_start))
1223 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1224 goto success;
1225 }
1226
1227 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1228 for (; vma; vma = vma->vm_next) {
1229 if (addr > limit)
1230 break;
1231 - if (addr + len <= vma->vm_start)
1232 + if (check_heap_stack_gap(vma, addr, len))
1233 goto success;
1234 addr = vma->vm_end;
1235 }
1236 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1237 for (; vma; vma = vma->vm_next) {
1238 if (addr > limit)
1239 break;
1240 - if (addr + len <= vma->vm_start)
1241 + if (check_heap_stack_gap(vma, addr, len))
1242 goto success;
1243 addr = vma->vm_end;
1244 }
1245 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1246 index 3fad89e..3047da5 100644
1247 --- a/arch/ia64/include/asm/atomic.h
1248 +++ b/arch/ia64/include/asm/atomic.h
1249 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1250 #define atomic64_inc(v) atomic64_add(1, (v))
1251 #define atomic64_dec(v) atomic64_sub(1, (v))
1252
1253 +#define atomic64_read_unchecked(v) atomic64_read(v)
1254 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1255 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1256 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1257 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1258 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1259 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1260 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1261 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1262 +
1263 /* Atomic operations are already serializing */
1264 #define smp_mb__before_atomic_dec() barrier()
1265 #define smp_mb__after_atomic_dec() barrier()
1266 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1267 index b5298eb..67c6e62 100644
1268 --- a/arch/ia64/include/asm/elf.h
1269 +++ b/arch/ia64/include/asm/elf.h
1270 @@ -42,6 +42,13 @@
1271 */
1272 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1273
1274 +#ifdef CONFIG_PAX_ASLR
1275 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1276 +
1277 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1278 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1279 +#endif
1280 +
1281 #define PT_IA_64_UNWIND 0x70000001
1282
1283 /* IA-64 relocations: */
1284 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1285 index 1a97af3..7529d31 100644
1286 --- a/arch/ia64/include/asm/pgtable.h
1287 +++ b/arch/ia64/include/asm/pgtable.h
1288 @@ -12,7 +12,7 @@
1289 * David Mosberger-Tang <davidm@hpl.hp.com>
1290 */
1291
1292 -
1293 +#include <linux/const.h>
1294 #include <asm/mman.h>
1295 #include <asm/page.h>
1296 #include <asm/processor.h>
1297 @@ -143,6 +143,17 @@
1298 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1299 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1300 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1301 +
1302 +#ifdef CONFIG_PAX_PAGEEXEC
1303 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1304 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1305 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1306 +#else
1307 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1308 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +#endif
1311 +
1312 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1313 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1314 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1315 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1316 index b77768d..e0795eb 100644
1317 --- a/arch/ia64/include/asm/spinlock.h
1318 +++ b/arch/ia64/include/asm/spinlock.h
1319 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1320 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1321
1322 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1323 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1324 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1325 }
1326
1327 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1328 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1329 index 449c8c0..432a3d2 100644
1330 --- a/arch/ia64/include/asm/uaccess.h
1331 +++ b/arch/ia64/include/asm/uaccess.h
1332 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1333 const void *__cu_from = (from); \
1334 long __cu_len = (n); \
1335 \
1336 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1337 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1338 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1339 __cu_len; \
1340 })
1341 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1342 long __cu_len = (n); \
1343 \
1344 __chk_user_ptr(__cu_from); \
1345 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1346 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1348 __cu_len; \
1349 })
1350 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1351 index 24603be..948052d 100644
1352 --- a/arch/ia64/kernel/module.c
1353 +++ b/arch/ia64/kernel/module.c
1354 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1355 void
1356 module_free (struct module *mod, void *module_region)
1357 {
1358 - if (mod && mod->arch.init_unw_table &&
1359 - module_region == mod->module_init) {
1360 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1361 unw_remove_unwind_table(mod->arch.init_unw_table);
1362 mod->arch.init_unw_table = NULL;
1363 }
1364 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1365 }
1366
1367 static inline int
1368 +in_init_rx (const struct module *mod, uint64_t addr)
1369 +{
1370 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1371 +}
1372 +
1373 +static inline int
1374 +in_init_rw (const struct module *mod, uint64_t addr)
1375 +{
1376 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1377 +}
1378 +
1379 +static inline int
1380 in_init (const struct module *mod, uint64_t addr)
1381 {
1382 - return addr - (uint64_t) mod->module_init < mod->init_size;
1383 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1384 +}
1385 +
1386 +static inline int
1387 +in_core_rx (const struct module *mod, uint64_t addr)
1388 +{
1389 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1390 +}
1391 +
1392 +static inline int
1393 +in_core_rw (const struct module *mod, uint64_t addr)
1394 +{
1395 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1396 }
1397
1398 static inline int
1399 in_core (const struct module *mod, uint64_t addr)
1400 {
1401 - return addr - (uint64_t) mod->module_core < mod->core_size;
1402 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1403 }
1404
1405 static inline int
1406 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1407 break;
1408
1409 case RV_BDREL:
1410 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1411 + if (in_init_rx(mod, val))
1412 + val -= (uint64_t) mod->module_init_rx;
1413 + else if (in_init_rw(mod, val))
1414 + val -= (uint64_t) mod->module_init_rw;
1415 + else if (in_core_rx(mod, val))
1416 + val -= (uint64_t) mod->module_core_rx;
1417 + else if (in_core_rw(mod, val))
1418 + val -= (uint64_t) mod->module_core_rw;
1419 break;
1420
1421 case RV_LTV:
1422 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1423 * addresses have been selected...
1424 */
1425 uint64_t gp;
1426 - if (mod->core_size > MAX_LTOFF)
1427 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1428 /*
1429 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1430 * at the end of the module.
1431 */
1432 - gp = mod->core_size - MAX_LTOFF / 2;
1433 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1434 else
1435 - gp = mod->core_size / 2;
1436 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1437 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1438 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1439 mod->arch.gp = gp;
1440 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1441 }
1442 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1443 index 609d500..7dde2a8 100644
1444 --- a/arch/ia64/kernel/sys_ia64.c
1445 +++ b/arch/ia64/kernel/sys_ia64.c
1446 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1447 if (REGION_NUMBER(addr) == RGN_HPAGE)
1448 addr = 0;
1449 #endif
1450 +
1451 +#ifdef CONFIG_PAX_RANDMMAP
1452 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1453 + addr = mm->free_area_cache;
1454 + else
1455 +#endif
1456 +
1457 if (!addr)
1458 addr = mm->free_area_cache;
1459
1460 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1461 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1462 /* At this point: (!vma || addr < vma->vm_end). */
1463 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1464 - if (start_addr != TASK_UNMAPPED_BASE) {
1465 + if (start_addr != mm->mmap_base) {
1466 /* Start a new search --- just in case we missed some holes. */
1467 - addr = TASK_UNMAPPED_BASE;
1468 + addr = mm->mmap_base;
1469 goto full_search;
1470 }
1471 return -ENOMEM;
1472 }
1473 - if (!vma || addr + len <= vma->vm_start) {
1474 + if (check_heap_stack_gap(vma, addr, len)) {
1475 /* Remember the address where we stopped this search: */
1476 mm->free_area_cache = addr + len;
1477 return addr;
1478 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1479 index 53c0ba0..2accdde 100644
1480 --- a/arch/ia64/kernel/vmlinux.lds.S
1481 +++ b/arch/ia64/kernel/vmlinux.lds.S
1482 @@ -199,7 +199,7 @@ SECTIONS {
1483 /* Per-cpu data: */
1484 . = ALIGN(PERCPU_PAGE_SIZE);
1485 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1486 - __phys_per_cpu_start = __per_cpu_load;
1487 + __phys_per_cpu_start = per_cpu_load;
1488 /*
1489 * ensure percpu data fits
1490 * into percpu page size
1491 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1492 index 20b3593..1ce77f0 100644
1493 --- a/arch/ia64/mm/fault.c
1494 +++ b/arch/ia64/mm/fault.c
1495 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1496 return pte_present(pte);
1497 }
1498
1499 +#ifdef CONFIG_PAX_PAGEEXEC
1500 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1501 +{
1502 + unsigned long i;
1503 +
1504 + printk(KERN_ERR "PAX: bytes at PC: ");
1505 + for (i = 0; i < 8; i++) {
1506 + unsigned int c;
1507 + if (get_user(c, (unsigned int *)pc+i))
1508 + printk(KERN_CONT "???????? ");
1509 + else
1510 + printk(KERN_CONT "%08x ", c);
1511 + }
1512 + printk("\n");
1513 +}
1514 +#endif
1515 +
1516 void __kprobes
1517 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1518 {
1519 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1520 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1521 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1522
1523 - if ((vma->vm_flags & mask) != mask)
1524 + if ((vma->vm_flags & mask) != mask) {
1525 +
1526 +#ifdef CONFIG_PAX_PAGEEXEC
1527 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1528 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1529 + goto bad_area;
1530 +
1531 + up_read(&mm->mmap_sem);
1532 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1533 + do_group_exit(SIGKILL);
1534 + }
1535 +#endif
1536 +
1537 goto bad_area;
1538
1539 + }
1540 +
1541 /*
1542 * If for any reason at all we couldn't handle the fault, make
1543 * sure we exit gracefully rather than endlessly redo the
1544 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1545 index 5ca674b..e0e1b70 100644
1546 --- a/arch/ia64/mm/hugetlbpage.c
1547 +++ b/arch/ia64/mm/hugetlbpage.c
1548 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1549 /* At this point: (!vmm || addr < vmm->vm_end). */
1550 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1551 return -ENOMEM;
1552 - if (!vmm || (addr + len) <= vmm->vm_start)
1553 + if (check_heap_stack_gap(vmm, addr, len))
1554 return addr;
1555 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1556 }
1557 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1558 index 00cb0e2..2ad8024 100644
1559 --- a/arch/ia64/mm/init.c
1560 +++ b/arch/ia64/mm/init.c
1561 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1562 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1563 vma->vm_end = vma->vm_start + PAGE_SIZE;
1564 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1565 +
1566 +#ifdef CONFIG_PAX_PAGEEXEC
1567 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1568 + vma->vm_flags &= ~VM_EXEC;
1569 +
1570 +#ifdef CONFIG_PAX_MPROTECT
1571 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1572 + vma->vm_flags &= ~VM_MAYEXEC;
1573 +#endif
1574 +
1575 + }
1576 +#endif
1577 +
1578 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1579 down_write(&current->mm->mmap_sem);
1580 if (insert_vm_struct(current->mm, vma)) {
1581 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1582 index 82abd15..d95ae5d 100644
1583 --- a/arch/m32r/lib/usercopy.c
1584 +++ b/arch/m32r/lib/usercopy.c
1585 @@ -14,6 +14,9 @@
1586 unsigned long
1587 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1588 {
1589 + if ((long)n < 0)
1590 + return n;
1591 +
1592 prefetch(from);
1593 if (access_ok(VERIFY_WRITE, to, n))
1594 __copy_user(to,from,n);
1595 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1596 unsigned long
1597 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1598 {
1599 + if ((long)n < 0)
1600 + return n;
1601 +
1602 prefetchw(to);
1603 if (access_ok(VERIFY_READ, from, n))
1604 __copy_user_zeroing(to,from,n);
1605 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
1606 index 1d93f81..67794d0 100644
1607 --- a/arch/mips/include/asm/atomic.h
1608 +++ b/arch/mips/include/asm/atomic.h
1609 @@ -21,6 +21,10 @@
1610 #include <asm/war.h>
1611 #include <asm/system.h>
1612
1613 +#ifdef CONFIG_GENERIC_ATOMIC64
1614 +#include <asm-generic/atomic64.h>
1615 +#endif
1616 +
1617 #define ATOMIC_INIT(i) { (i) }
1618
1619 /*
1620 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1621 */
1622 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
1623
1624 +#define atomic64_read_unchecked(v) atomic64_read(v)
1625 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1626 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1627 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1628 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1629 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1630 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1631 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1632 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1633 +
1634 #endif /* CONFIG_64BIT */
1635
1636 /*
1637 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1638 index 455c0ac..ad65fbe 100644
1639 --- a/arch/mips/include/asm/elf.h
1640 +++ b/arch/mips/include/asm/elf.h
1641 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1642 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1643 #endif
1644
1645 +#ifdef CONFIG_PAX_ASLR
1646 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1647 +
1648 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1649 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1650 +#endif
1651 +
1652 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1653 struct linux_binprm;
1654 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1655 int uses_interp);
1656
1657 -struct mm_struct;
1658 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1659 -#define arch_randomize_brk arch_randomize_brk
1660 -
1661 #endif /* _ASM_ELF_H */
1662 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1663 index e59cd1a..8e329d6 100644
1664 --- a/arch/mips/include/asm/page.h
1665 +++ b/arch/mips/include/asm/page.h
1666 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1667 #ifdef CONFIG_CPU_MIPS32
1668 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1669 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1670 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1671 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1672 #else
1673 typedef struct { unsigned long long pte; } pte_t;
1674 #define pte_val(x) ((x).pte)
1675 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1676 index 6018c80..7c37203 100644
1677 --- a/arch/mips/include/asm/system.h
1678 +++ b/arch/mips/include/asm/system.h
1679 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1680 */
1681 #define __ARCH_WANT_UNLOCKED_CTXSW
1682
1683 -extern unsigned long arch_align_stack(unsigned long sp);
1684 +#define arch_align_stack(x) ((x) & ~0xfUL)
1685
1686 #endif /* _ASM_SYSTEM_H */
1687 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1688 index 9fdd8bc..4bd7f1a 100644
1689 --- a/arch/mips/kernel/binfmt_elfn32.c
1690 +++ b/arch/mips/kernel/binfmt_elfn32.c
1691 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1692 #undef ELF_ET_DYN_BASE
1693 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1694
1695 +#ifdef CONFIG_PAX_ASLR
1696 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1697 +
1698 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1699 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1700 +#endif
1701 +
1702 #include <asm/processor.h>
1703 #include <linux/module.h>
1704 #include <linux/elfcore.h>
1705 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1706 index ff44823..97f8906 100644
1707 --- a/arch/mips/kernel/binfmt_elfo32.c
1708 +++ b/arch/mips/kernel/binfmt_elfo32.c
1709 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1710 #undef ELF_ET_DYN_BASE
1711 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1712
1713 +#ifdef CONFIG_PAX_ASLR
1714 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1715 +
1716 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1717 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1718 +#endif
1719 +
1720 #include <asm/processor.h>
1721
1722 /*
1723 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1724 index c47f96e..661d418 100644
1725 --- a/arch/mips/kernel/process.c
1726 +++ b/arch/mips/kernel/process.c
1727 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1728 out:
1729 return pc;
1730 }
1731 -
1732 -/*
1733 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1734 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1735 - */
1736 -unsigned long arch_align_stack(unsigned long sp)
1737 -{
1738 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1739 - sp -= get_random_int() & ~PAGE_MASK;
1740 -
1741 - return sp & ALMASK;
1742 -}
1743 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1744 index 937cf33..adb39bb 100644
1745 --- a/arch/mips/mm/fault.c
1746 +++ b/arch/mips/mm/fault.c
1747 @@ -28,6 +28,23 @@
1748 #include <asm/highmem.h> /* For VMALLOC_END */
1749 #include <linux/kdebug.h>
1750
1751 +#ifdef CONFIG_PAX_PAGEEXEC
1752 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1753 +{
1754 + unsigned long i;
1755 +
1756 + printk(KERN_ERR "PAX: bytes at PC: ");
1757 + for (i = 0; i < 5; i++) {
1758 + unsigned int c;
1759 + if (get_user(c, (unsigned int *)pc+i))
1760 + printk(KERN_CONT "???????? ");
1761 + else
1762 + printk(KERN_CONT "%08x ", c);
1763 + }
1764 + printk("\n");
1765 +}
1766 +#endif
1767 +
1768 /*
1769 * This routine handles page faults. It determines the address,
1770 * and the problem, and then passes it off to one of the appropriate
1771 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1772 index 302d779..7d35bf8 100644
1773 --- a/arch/mips/mm/mmap.c
1774 +++ b/arch/mips/mm/mmap.c
1775 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1776 do_color_align = 1;
1777
1778 /* requesting a specific address */
1779 +
1780 +#ifdef CONFIG_PAX_RANDMMAP
1781 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1782 +#endif
1783 +
1784 if (addr) {
1785 if (do_color_align)
1786 addr = COLOUR_ALIGN(addr, pgoff);
1787 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1788 addr = PAGE_ALIGN(addr);
1789
1790 vma = find_vma(mm, addr);
1791 - if (TASK_SIZE - len >= addr &&
1792 - (!vma || addr + len <= vma->vm_start))
1793 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1794 return addr;
1795 }
1796
1797 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1798 /* At this point: (!vma || addr < vma->vm_end). */
1799 if (TASK_SIZE - len < addr)
1800 return -ENOMEM;
1801 - if (!vma || addr + len <= vma->vm_start)
1802 + if (check_heap_stack_gap(vmm, addr, len))
1803 return addr;
1804 addr = vma->vm_end;
1805 if (do_color_align)
1806 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1807 /* make sure it can fit in the remaining address space */
1808 if (likely(addr > len)) {
1809 vma = find_vma(mm, addr - len);
1810 - if (!vma || addr <= vma->vm_start) {
1811 + if (check_heap_stack_gap(vmm, addr - len, len))
1812 /* cache the address as a hint for next time */
1813 return mm->free_area_cache = addr - len;
1814 }
1815 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1816 * return with success:
1817 */
1818 vma = find_vma(mm, addr);
1819 - if (likely(!vma || addr + len <= vma->vm_start)) {
1820 + if (check_heap_stack_gap(vmm, addr, len)) {
1821 /* cache the address as a hint for next time */
1822 return mm->free_area_cache = addr;
1823 }
1824 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1825 mm->unmap_area = arch_unmap_area_topdown;
1826 }
1827 }
1828 -
1829 -static inline unsigned long brk_rnd(void)
1830 -{
1831 - unsigned long rnd = get_random_int();
1832 -
1833 - rnd = rnd << PAGE_SHIFT;
1834 - /* 8MB for 32bit, 256MB for 64bit */
1835 - if (TASK_IS_32BIT_ADDR)
1836 - rnd = rnd & 0x7ffffful;
1837 - else
1838 - rnd = rnd & 0xffffffful;
1839 -
1840 - return rnd;
1841 -}
1842 -
1843 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1844 -{
1845 - unsigned long base = mm->brk;
1846 - unsigned long ret;
1847 -
1848 - ret = PAGE_ALIGN(base + brk_rnd());
1849 -
1850 - if (ret < mm->brk)
1851 - return mm->brk;
1852 -
1853 - return ret;
1854 -}
1855 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
1856 index 4054b31..a10c105 100644
1857 --- a/arch/parisc/include/asm/atomic.h
1858 +++ b/arch/parisc/include/asm/atomic.h
1859 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1860
1861 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
1862
1863 +#define atomic64_read_unchecked(v) atomic64_read(v)
1864 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1865 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1866 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1867 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1868 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1869 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1870 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1871 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1872 +
1873 #endif /* !CONFIG_64BIT */
1874
1875
1876 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1877 index 19f6cb1..6c78cf2 100644
1878 --- a/arch/parisc/include/asm/elf.h
1879 +++ b/arch/parisc/include/asm/elf.h
1880 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1881
1882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1883
1884 +#ifdef CONFIG_PAX_ASLR
1885 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1886 +
1887 +#define PAX_DELTA_MMAP_LEN 16
1888 +#define PAX_DELTA_STACK_LEN 16
1889 +#endif
1890 +
1891 /* This yields a mask that user programs can use to figure out what
1892 instruction set this CPU supports. This could be done in user space,
1893 but it's not easy, and we've already done it here. */
1894 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1895 index 22dadeb..f6c2be4 100644
1896 --- a/arch/parisc/include/asm/pgtable.h
1897 +++ b/arch/parisc/include/asm/pgtable.h
1898 @@ -210,6 +210,17 @@ struct vm_area_struct;
1899 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1900 #define PAGE_COPY PAGE_EXECREAD
1901 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1902 +
1903 +#ifdef CONFIG_PAX_PAGEEXEC
1904 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1905 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1906 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1907 +#else
1908 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1909 +# define PAGE_COPY_NOEXEC PAGE_COPY
1910 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1911 +#endif
1912 +
1913 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1914 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1915 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1916 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1917 index 5e34ccf..672bc9c 100644
1918 --- a/arch/parisc/kernel/module.c
1919 +++ b/arch/parisc/kernel/module.c
1920 @@ -98,16 +98,38 @@
1921
1922 /* three functions to determine where in the module core
1923 * or init pieces the location is */
1924 +static inline int in_init_rx(struct module *me, void *loc)
1925 +{
1926 + return (loc >= me->module_init_rx &&
1927 + loc < (me->module_init_rx + me->init_size_rx));
1928 +}
1929 +
1930 +static inline int in_init_rw(struct module *me, void *loc)
1931 +{
1932 + return (loc >= me->module_init_rw &&
1933 + loc < (me->module_init_rw + me->init_size_rw));
1934 +}
1935 +
1936 static inline int in_init(struct module *me, void *loc)
1937 {
1938 - return (loc >= me->module_init &&
1939 - loc <= (me->module_init + me->init_size));
1940 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1941 +}
1942 +
1943 +static inline int in_core_rx(struct module *me, void *loc)
1944 +{
1945 + return (loc >= me->module_core_rx &&
1946 + loc < (me->module_core_rx + me->core_size_rx));
1947 +}
1948 +
1949 +static inline int in_core_rw(struct module *me, void *loc)
1950 +{
1951 + return (loc >= me->module_core_rw &&
1952 + loc < (me->module_core_rw + me->core_size_rw));
1953 }
1954
1955 static inline int in_core(struct module *me, void *loc)
1956 {
1957 - return (loc >= me->module_core &&
1958 - loc <= (me->module_core + me->core_size));
1959 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1960 }
1961
1962 static inline int in_local(struct module *me, void *loc)
1963 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1964 }
1965
1966 /* align things a bit */
1967 - me->core_size = ALIGN(me->core_size, 16);
1968 - me->arch.got_offset = me->core_size;
1969 - me->core_size += gots * sizeof(struct got_entry);
1970 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1971 + me->arch.got_offset = me->core_size_rw;
1972 + me->core_size_rw += gots * sizeof(struct got_entry);
1973
1974 - me->core_size = ALIGN(me->core_size, 16);
1975 - me->arch.fdesc_offset = me->core_size;
1976 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1977 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1978 + me->arch.fdesc_offset = me->core_size_rw;
1979 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1980
1981 me->arch.got_max = gots;
1982 me->arch.fdesc_max = fdescs;
1983 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1984
1985 BUG_ON(value == 0);
1986
1987 - got = me->module_core + me->arch.got_offset;
1988 + got = me->module_core_rw + me->arch.got_offset;
1989 for (i = 0; got[i].addr; i++)
1990 if (got[i].addr == value)
1991 goto out;
1992 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1993 #ifdef CONFIG_64BIT
1994 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1995 {
1996 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1997 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1998
1999 if (!value) {
2000 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2001 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2002
2003 /* Create new one */
2004 fdesc->addr = value;
2005 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2006 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2007 return (Elf_Addr)fdesc;
2008 }
2009 #endif /* CONFIG_64BIT */
2010 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
2011
2012 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2013 end = table + sechdrs[me->arch.unwind_section].sh_size;
2014 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2015 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2016
2017 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2018 me->arch.unwind_section, table, end, gp);
2019 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2020 index c9b9322..02d8940 100644
2021 --- a/arch/parisc/kernel/sys_parisc.c
2022 +++ b/arch/parisc/kernel/sys_parisc.c
2023 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2024 /* At this point: (!vma || addr < vma->vm_end). */
2025 if (TASK_SIZE - len < addr)
2026 return -ENOMEM;
2027 - if (!vma || addr + len <= vma->vm_start)
2028 + if (check_heap_stack_gap(vma, addr, len))
2029 return addr;
2030 addr = vma->vm_end;
2031 }
2032 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2033 /* At this point: (!vma || addr < vma->vm_end). */
2034 if (TASK_SIZE - len < addr)
2035 return -ENOMEM;
2036 - if (!vma || addr + len <= vma->vm_start)
2037 + if (check_heap_stack_gap(vma, addr, len))
2038 return addr;
2039 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2040 if (addr < vma->vm_end) /* handle wraparound */
2041 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2042 if (flags & MAP_FIXED)
2043 return addr;
2044 if (!addr)
2045 - addr = TASK_UNMAPPED_BASE;
2046 + addr = current->mm->mmap_base;
2047
2048 if (filp) {
2049 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2050 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2051 index f19e660..414fe24 100644
2052 --- a/arch/parisc/kernel/traps.c
2053 +++ b/arch/parisc/kernel/traps.c
2054 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2055
2056 down_read(&current->mm->mmap_sem);
2057 vma = find_vma(current->mm,regs->iaoq[0]);
2058 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2059 - && (vma->vm_flags & VM_EXEC)) {
2060 -
2061 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2062 fault_address = regs->iaoq[0];
2063 fault_space = regs->iasq[0];
2064
2065 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2066 index 18162ce..94de376 100644
2067 --- a/arch/parisc/mm/fault.c
2068 +++ b/arch/parisc/mm/fault.c
2069 @@ -15,6 +15,7 @@
2070 #include <linux/sched.h>
2071 #include <linux/interrupt.h>
2072 #include <linux/module.h>
2073 +#include <linux/unistd.h>
2074
2075 #include <asm/uaccess.h>
2076 #include <asm/traps.h>
2077 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2078 static unsigned long
2079 parisc_acctyp(unsigned long code, unsigned int inst)
2080 {
2081 - if (code == 6 || code == 16)
2082 + if (code == 6 || code == 7 || code == 16)
2083 return VM_EXEC;
2084
2085 switch (inst & 0xf0000000) {
2086 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2087 }
2088 #endif
2089
2090 +#ifdef CONFIG_PAX_PAGEEXEC
2091 +/*
2092 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2093 + *
2094 + * returns 1 when task should be killed
2095 + * 2 when rt_sigreturn trampoline was detected
2096 + * 3 when unpatched PLT trampoline was detected
2097 + */
2098 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2099 +{
2100 +
2101 +#ifdef CONFIG_PAX_EMUPLT
2102 + int err;
2103 +
2104 + do { /* PaX: unpatched PLT emulation */
2105 + unsigned int bl, depwi;
2106 +
2107 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2108 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2109 +
2110 + if (err)
2111 + break;
2112 +
2113 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2114 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2115 +
2116 + err = get_user(ldw, (unsigned int *)addr);
2117 + err |= get_user(bv, (unsigned int *)(addr+4));
2118 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2119 +
2120 + if (err)
2121 + break;
2122 +
2123 + if (ldw == 0x0E801096U &&
2124 + bv == 0xEAC0C000U &&
2125 + ldw2 == 0x0E881095U)
2126 + {
2127 + unsigned int resolver, map;
2128 +
2129 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2130 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2131 + if (err)
2132 + break;
2133 +
2134 + regs->gr[20] = instruction_pointer(regs)+8;
2135 + regs->gr[21] = map;
2136 + regs->gr[22] = resolver;
2137 + regs->iaoq[0] = resolver | 3UL;
2138 + regs->iaoq[1] = regs->iaoq[0] + 4;
2139 + return 3;
2140 + }
2141 + }
2142 + } while (0);
2143 +#endif
2144 +
2145 +#ifdef CONFIG_PAX_EMUTRAMP
2146 +
2147 +#ifndef CONFIG_PAX_EMUSIGRT
2148 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2149 + return 1;
2150 +#endif
2151 +
2152 + do { /* PaX: rt_sigreturn emulation */
2153 + unsigned int ldi1, ldi2, bel, nop;
2154 +
2155 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2156 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2157 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2158 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2159 +
2160 + if (err)
2161 + break;
2162 +
2163 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2164 + ldi2 == 0x3414015AU &&
2165 + bel == 0xE4008200U &&
2166 + nop == 0x08000240U)
2167 + {
2168 + regs->gr[25] = (ldi1 & 2) >> 1;
2169 + regs->gr[20] = __NR_rt_sigreturn;
2170 + regs->gr[31] = regs->iaoq[1] + 16;
2171 + regs->sr[0] = regs->iasq[1];
2172 + regs->iaoq[0] = 0x100UL;
2173 + regs->iaoq[1] = regs->iaoq[0] + 4;
2174 + regs->iasq[0] = regs->sr[2];
2175 + regs->iasq[1] = regs->sr[2];
2176 + return 2;
2177 + }
2178 + } while (0);
2179 +#endif
2180 +
2181 + return 1;
2182 +}
2183 +
2184 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2185 +{
2186 + unsigned long i;
2187 +
2188 + printk(KERN_ERR "PAX: bytes at PC: ");
2189 + for (i = 0; i < 5; i++) {
2190 + unsigned int c;
2191 + if (get_user(c, (unsigned int *)pc+i))
2192 + printk(KERN_CONT "???????? ");
2193 + else
2194 + printk(KERN_CONT "%08x ", c);
2195 + }
2196 + printk("\n");
2197 +}
2198 +#endif
2199 +
2200 int fixup_exception(struct pt_regs *regs)
2201 {
2202 const struct exception_table_entry *fix;
2203 @@ -192,8 +303,33 @@ good_area:
2204
2205 acc_type = parisc_acctyp(code,regs->iir);
2206
2207 - if ((vma->vm_flags & acc_type) != acc_type)
2208 + if ((vma->vm_flags & acc_type) != acc_type) {
2209 +
2210 +#ifdef CONFIG_PAX_PAGEEXEC
2211 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2212 + (address & ~3UL) == instruction_pointer(regs))
2213 + {
2214 + up_read(&mm->mmap_sem);
2215 + switch (pax_handle_fetch_fault(regs)) {
2216 +
2217 +#ifdef CONFIG_PAX_EMUPLT
2218 + case 3:
2219 + return;
2220 +#endif
2221 +
2222 +#ifdef CONFIG_PAX_EMUTRAMP
2223 + case 2:
2224 + return;
2225 +#endif
2226 +
2227 + }
2228 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2229 + do_group_exit(SIGKILL);
2230 + }
2231 +#endif
2232 +
2233 goto bad_area;
2234 + }
2235
2236 /*
2237 * If for any reason at all we couldn't handle the fault, make
2238 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
2239 index 02e41b5..ec6e26c 100644
2240 --- a/arch/powerpc/include/asm/atomic.h
2241 +++ b/arch/powerpc/include/asm/atomic.h
2242 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2243
2244 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2245
2246 +#define atomic64_read_unchecked(v) atomic64_read(v)
2247 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2248 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2249 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2250 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2251 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2252 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2253 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2254 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2255 +
2256 #endif /* __powerpc64__ */
2257
2258 #endif /* __KERNEL__ */
2259 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2260 index 3bf9cca..e7457d0 100644
2261 --- a/arch/powerpc/include/asm/elf.h
2262 +++ b/arch/powerpc/include/asm/elf.h
2263 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2264 the loader. We need to make sure that it is out of the way of the program
2265 that it will "exec", and that there is sufficient room for the brk. */
2266
2267 -extern unsigned long randomize_et_dyn(unsigned long base);
2268 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2269 +#define ELF_ET_DYN_BASE (0x20000000)
2270 +
2271 +#ifdef CONFIG_PAX_ASLR
2272 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2273 +
2274 +#ifdef __powerpc64__
2275 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2276 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2277 +#else
2278 +#define PAX_DELTA_MMAP_LEN 15
2279 +#define PAX_DELTA_STACK_LEN 15
2280 +#endif
2281 +#endif
2282
2283 /*
2284 * Our registers are always unsigned longs, whether we're a 32 bit
2285 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2286 (0x7ff >> (PAGE_SHIFT - 12)) : \
2287 (0x3ffff >> (PAGE_SHIFT - 12)))
2288
2289 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2290 -#define arch_randomize_brk arch_randomize_brk
2291 -
2292 #endif /* __KERNEL__ */
2293
2294 /*
2295 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2296 index bca8fdc..61e9580 100644
2297 --- a/arch/powerpc/include/asm/kmap_types.h
2298 +++ b/arch/powerpc/include/asm/kmap_types.h
2299 @@ -27,6 +27,7 @@ enum km_type {
2300 KM_PPC_SYNC_PAGE,
2301 KM_PPC_SYNC_ICACHE,
2302 KM_KDB,
2303 + KM_CLEARPAGE,
2304 KM_TYPE_NR
2305 };
2306
2307 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2308 index d4a7f64..451de1c 100644
2309 --- a/arch/powerpc/include/asm/mman.h
2310 +++ b/arch/powerpc/include/asm/mman.h
2311 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2312 }
2313 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2314
2315 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2316 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2317 {
2318 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2319 }
2320 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2321 index dd9c4fd..a2ced87 100644
2322 --- a/arch/powerpc/include/asm/page.h
2323 +++ b/arch/powerpc/include/asm/page.h
2324 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2325 * and needs to be executable. This means the whole heap ends
2326 * up being executable.
2327 */
2328 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2329 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2330 +#define VM_DATA_DEFAULT_FLAGS32 \
2331 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2332 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2333
2334 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2335 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2336 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2337 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2338 #endif
2339
2340 +#define ktla_ktva(addr) (addr)
2341 +#define ktva_ktla(addr) (addr)
2342 +
2343 /*
2344 * Use the top bit of the higher-level page table entries to indicate whether
2345 * the entries we point to contain hugepages. This works because we know that
2346 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2347 index fb40ede..d3ce956 100644
2348 --- a/arch/powerpc/include/asm/page_64.h
2349 +++ b/arch/powerpc/include/asm/page_64.h
2350 @@ -144,15 +144,18 @@ do { \
2351 * stack by default, so in the absence of a PT_GNU_STACK program header
2352 * we turn execute permission off.
2353 */
2354 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2355 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2356 +#define VM_STACK_DEFAULT_FLAGS32 \
2357 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2358 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2359
2360 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2361 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2362
2363 +#ifndef CONFIG_PAX_PAGEEXEC
2364 #define VM_STACK_DEFAULT_FLAGS \
2365 (is_32bit_task() ? \
2366 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2367 +#endif
2368
2369 #include <asm-generic/getorder.h>
2370
2371 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2372 index 88b0bd9..e32bc67 100644
2373 --- a/arch/powerpc/include/asm/pgtable.h
2374 +++ b/arch/powerpc/include/asm/pgtable.h
2375 @@ -2,6 +2,7 @@
2376 #define _ASM_POWERPC_PGTABLE_H
2377 #ifdef __KERNEL__
2378
2379 +#include <linux/const.h>
2380 #ifndef __ASSEMBLY__
2381 #include <asm/processor.h> /* For TASK_SIZE */
2382 #include <asm/mmu.h>
2383 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2384 index 4aad413..85d86bf 100644
2385 --- a/arch/powerpc/include/asm/pte-hash32.h
2386 +++ b/arch/powerpc/include/asm/pte-hash32.h
2387 @@ -21,6 +21,7 @@
2388 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2389 #define _PAGE_USER 0x004 /* usermode access allowed */
2390 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2391 +#define _PAGE_EXEC _PAGE_GUARDED
2392 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2393 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2394 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2395 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2396 index 559da19..7e5835c 100644
2397 --- a/arch/powerpc/include/asm/reg.h
2398 +++ b/arch/powerpc/include/asm/reg.h
2399 @@ -212,6 +212,7 @@
2400 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2401 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2402 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2403 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2404 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2405 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2406 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2407 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2408 index e30a13d..2b7d994 100644
2409 --- a/arch/powerpc/include/asm/system.h
2410 +++ b/arch/powerpc/include/asm/system.h
2411 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2412 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2413 #endif
2414
2415 -extern unsigned long arch_align_stack(unsigned long sp);
2416 +#define arch_align_stack(x) ((x) & ~0xfUL)
2417
2418 /* Used in very early kernel initialization. */
2419 extern unsigned long reloc_offset(void);
2420 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2421 index bd0fb84..a42a14b 100644
2422 --- a/arch/powerpc/include/asm/uaccess.h
2423 +++ b/arch/powerpc/include/asm/uaccess.h
2424 @@ -13,6 +13,8 @@
2425 #define VERIFY_READ 0
2426 #define VERIFY_WRITE 1
2427
2428 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2429 +
2430 /*
2431 * The fs value determines whether argument validity checking should be
2432 * performed or not. If get_fs() == USER_DS, checking is performed, with
2433 @@ -327,52 +329,6 @@ do { \
2434 extern unsigned long __copy_tofrom_user(void __user *to,
2435 const void __user *from, unsigned long size);
2436
2437 -#ifndef __powerpc64__
2438 -
2439 -static inline unsigned long copy_from_user(void *to,
2440 - const void __user *from, unsigned long n)
2441 -{
2442 - unsigned long over;
2443 -
2444 - if (access_ok(VERIFY_READ, from, n))
2445 - return __copy_tofrom_user((__force void __user *)to, from, n);
2446 - if ((unsigned long)from < TASK_SIZE) {
2447 - over = (unsigned long)from + n - TASK_SIZE;
2448 - return __copy_tofrom_user((__force void __user *)to, from,
2449 - n - over) + over;
2450 - }
2451 - return n;
2452 -}
2453 -
2454 -static inline unsigned long copy_to_user(void __user *to,
2455 - const void *from, unsigned long n)
2456 -{
2457 - unsigned long over;
2458 -
2459 - if (access_ok(VERIFY_WRITE, to, n))
2460 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2461 - if ((unsigned long)to < TASK_SIZE) {
2462 - over = (unsigned long)to + n - TASK_SIZE;
2463 - return __copy_tofrom_user(to, (__force void __user *)from,
2464 - n - over) + over;
2465 - }
2466 - return n;
2467 -}
2468 -
2469 -#else /* __powerpc64__ */
2470 -
2471 -#define __copy_in_user(to, from, size) \
2472 - __copy_tofrom_user((to), (from), (size))
2473 -
2474 -extern unsigned long copy_from_user(void *to, const void __user *from,
2475 - unsigned long n);
2476 -extern unsigned long copy_to_user(void __user *to, const void *from,
2477 - unsigned long n);
2478 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2479 - unsigned long n);
2480 -
2481 -#endif /* __powerpc64__ */
2482 -
2483 static inline unsigned long __copy_from_user_inatomic(void *to,
2484 const void __user *from, unsigned long n)
2485 {
2486 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2487 if (ret == 0)
2488 return 0;
2489 }
2490 +
2491 + if (!__builtin_constant_p(n))
2492 + check_object_size(to, n, false);
2493 +
2494 return __copy_tofrom_user((__force void __user *)to, from, n);
2495 }
2496
2497 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2498 if (ret == 0)
2499 return 0;
2500 }
2501 +
2502 + if (!__builtin_constant_p(n))
2503 + check_object_size(from, n, true);
2504 +
2505 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2506 }
2507
2508 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2509 return __copy_to_user_inatomic(to, from, size);
2510 }
2511
2512 +#ifndef __powerpc64__
2513 +
2514 +static inline unsigned long __must_check copy_from_user(void *to,
2515 + const void __user *from, unsigned long n)
2516 +{
2517 + unsigned long over;
2518 +
2519 + if ((long)n < 0)
2520 + return n;
2521 +
2522 + if (access_ok(VERIFY_READ, from, n)) {
2523 + if (!__builtin_constant_p(n))
2524 + check_object_size(to, n, false);
2525 + return __copy_tofrom_user((__force void __user *)to, from, n);
2526 + }
2527 + if ((unsigned long)from < TASK_SIZE) {
2528 + over = (unsigned long)from + n - TASK_SIZE;
2529 + if (!__builtin_constant_p(n - over))
2530 + check_object_size(to, n - over, false);
2531 + return __copy_tofrom_user((__force void __user *)to, from,
2532 + n - over) + over;
2533 + }
2534 + return n;
2535 +}
2536 +
2537 +static inline unsigned long __must_check copy_to_user(void __user *to,
2538 + const void *from, unsigned long n)
2539 +{
2540 + unsigned long over;
2541 +
2542 + if ((long)n < 0)
2543 + return n;
2544 +
2545 + if (access_ok(VERIFY_WRITE, to, n)) {
2546 + if (!__builtin_constant_p(n))
2547 + check_object_size(from, n, true);
2548 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2549 + }
2550 + if ((unsigned long)to < TASK_SIZE) {
2551 + over = (unsigned long)to + n - TASK_SIZE;
2552 + if (!__builtin_constant_p(n))
2553 + check_object_size(from, n - over, true);
2554 + return __copy_tofrom_user(to, (__force void __user *)from,
2555 + n - over) + over;
2556 + }
2557 + return n;
2558 +}
2559 +
2560 +#else /* __powerpc64__ */
2561 +
2562 +#define __copy_in_user(to, from, size) \
2563 + __copy_tofrom_user((to), (from), (size))
2564 +
2565 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2566 +{
2567 + if ((long)n < 0 || n > INT_MAX)
2568 + return n;
2569 +
2570 + if (!__builtin_constant_p(n))
2571 + check_object_size(to, n, false);
2572 +
2573 + if (likely(access_ok(VERIFY_READ, from, n)))
2574 + n = __copy_from_user(to, from, n);
2575 + else
2576 + memset(to, 0, n);
2577 + return n;
2578 +}
2579 +
2580 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2581 +{
2582 + if ((long)n < 0 || n > INT_MAX)
2583 + return n;
2584 +
2585 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2586 + if (!__builtin_constant_p(n))
2587 + check_object_size(from, n, true);
2588 + n = __copy_to_user(to, from, n);
2589 + }
2590 + return n;
2591 +}
2592 +
2593 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2594 + unsigned long n);
2595 +
2596 +#endif /* __powerpc64__ */
2597 +
2598 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2599
2600 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2601 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2602 index 429983c..7af363b 100644
2603 --- a/arch/powerpc/kernel/exceptions-64e.S
2604 +++ b/arch/powerpc/kernel/exceptions-64e.S
2605 @@ -587,6 +587,7 @@ storage_fault_common:
2606 std r14,_DAR(r1)
2607 std r15,_DSISR(r1)
2608 addi r3,r1,STACK_FRAME_OVERHEAD
2609 + bl .save_nvgprs
2610 mr r4,r14
2611 mr r5,r15
2612 ld r14,PACA_EXGEN+EX_R14(r13)
2613 @@ -596,8 +597,7 @@ storage_fault_common:
2614 cmpdi r3,0
2615 bne- 1f
2616 b .ret_from_except_lite
2617 -1: bl .save_nvgprs
2618 - mr r5,r3
2619 +1: mr r5,r3
2620 addi r3,r1,STACK_FRAME_OVERHEAD
2621 ld r4,_DAR(r1)
2622 bl .bad_page_fault
2623 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2624 index cf9c69b..ebc9640 100644
2625 --- a/arch/powerpc/kernel/exceptions-64s.S
2626 +++ b/arch/powerpc/kernel/exceptions-64s.S
2627 @@ -1004,10 +1004,10 @@ handle_page_fault:
2628 11: ld r4,_DAR(r1)
2629 ld r5,_DSISR(r1)
2630 addi r3,r1,STACK_FRAME_OVERHEAD
2631 + bl .save_nvgprs
2632 bl .do_page_fault
2633 cmpdi r3,0
2634 beq+ 13f
2635 - bl .save_nvgprs
2636 mr r5,r3
2637 addi r3,r1,STACK_FRAME_OVERHEAD
2638 lwz r4,_DAR(r1)
2639 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2640 index 0b6d796..d760ddb 100644
2641 --- a/arch/powerpc/kernel/module_32.c
2642 +++ b/arch/powerpc/kernel/module_32.c
2643 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2644 me->arch.core_plt_section = i;
2645 }
2646 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2647 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2648 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2649 return -ENOEXEC;
2650 }
2651
2652 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2653
2654 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2655 /* Init, or core PLT? */
2656 - if (location >= mod->module_core
2657 - && location < mod->module_core + mod->core_size)
2658 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2659 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2660 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2661 - else
2662 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2663 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2664 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2665 + else {
2666 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2667 + return ~0UL;
2668 + }
2669
2670 /* Find this entry, or if that fails, the next avail. entry */
2671 while (entry->jump[0]) {
2672 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2673 index 6457574..08b28d3 100644
2674 --- a/arch/powerpc/kernel/process.c
2675 +++ b/arch/powerpc/kernel/process.c
2676 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2677 * Lookup NIP late so we have the best change of getting the
2678 * above info out without failing
2679 */
2680 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2681 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2682 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2683 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2684 #endif
2685 show_stack(current, (unsigned long *) regs->gpr[1]);
2686 if (!user_mode(regs))
2687 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2688 newsp = stack[0];
2689 ip = stack[STACK_FRAME_LR_SAVE];
2690 if (!firstframe || ip != lr) {
2691 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2692 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2693 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2694 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2695 - printk(" (%pS)",
2696 + printk(" (%pA)",
2697 (void *)current->ret_stack[curr_frame].ret);
2698 curr_frame--;
2699 }
2700 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2701 struct pt_regs *regs = (struct pt_regs *)
2702 (sp + STACK_FRAME_OVERHEAD);
2703 lr = regs->link;
2704 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2705 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2706 regs->trap, (void *)regs->nip, (void *)lr);
2707 firstframe = 1;
2708 }
2709 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2710 }
2711
2712 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2713 -
2714 -unsigned long arch_align_stack(unsigned long sp)
2715 -{
2716 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2717 - sp -= get_random_int() & ~PAGE_MASK;
2718 - return sp & ~0xf;
2719 -}
2720 -
2721 -static inline unsigned long brk_rnd(void)
2722 -{
2723 - unsigned long rnd = 0;
2724 -
2725 - /* 8MB for 32bit, 1GB for 64bit */
2726 - if (is_32bit_task())
2727 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2728 - else
2729 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2730 -
2731 - return rnd << PAGE_SHIFT;
2732 -}
2733 -
2734 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2735 -{
2736 - unsigned long base = mm->brk;
2737 - unsigned long ret;
2738 -
2739 -#ifdef CONFIG_PPC_STD_MMU_64
2740 - /*
2741 - * If we are using 1TB segments and we are allowed to randomise
2742 - * the heap, we can put it above 1TB so it is backed by a 1TB
2743 - * segment. Otherwise the heap will be in the bottom 1TB
2744 - * which always uses 256MB segments and this may result in a
2745 - * performance penalty.
2746 - */
2747 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2748 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2749 -#endif
2750 -
2751 - ret = PAGE_ALIGN(base + brk_rnd());
2752 -
2753 - if (ret < mm->brk)
2754 - return mm->brk;
2755 -
2756 - return ret;
2757 -}
2758 -
2759 -unsigned long randomize_et_dyn(unsigned long base)
2760 -{
2761 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2762 -
2763 - if (ret < base)
2764 - return base;
2765 -
2766 - return ret;
2767 -}
2768 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2769 index 836a5a1..27289a3 100644
2770 --- a/arch/powerpc/kernel/signal_32.c
2771 +++ b/arch/powerpc/kernel/signal_32.c
2772 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2773 /* Save user registers on the stack */
2774 frame = &rt_sf->uc.uc_mcontext;
2775 addr = frame;
2776 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2777 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2778 if (save_user_regs(regs, frame, 0, 1))
2779 goto badframe;
2780 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2781 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2782 index a50b5ec..547078a 100644
2783 --- a/arch/powerpc/kernel/signal_64.c
2784 +++ b/arch/powerpc/kernel/signal_64.c
2785 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2786 current->thread.fpscr.val = 0;
2787
2788 /* Set up to return from userspace. */
2789 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2790 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2792 } else {
2793 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2794 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2795 index 5459d14..10f8070 100644
2796 --- a/arch/powerpc/kernel/traps.c
2797 +++ b/arch/powerpc/kernel/traps.c
2798 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2799 static inline void pmac_backlight_unblank(void) { }
2800 #endif
2801
2802 +extern void gr_handle_kernel_exploit(void);
2803 +
2804 int die(const char *str, struct pt_regs *regs, long err)
2805 {
2806 static struct {
2807 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2808 if (panic_on_oops)
2809 panic("Fatal exception");
2810
2811 + gr_handle_kernel_exploit();
2812 +
2813 oops_exit();
2814 do_exit(err);
2815
2816 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2817 index 7d14bb6..1305601 100644
2818 --- a/arch/powerpc/kernel/vdso.c
2819 +++ b/arch/powerpc/kernel/vdso.c
2820 @@ -35,6 +35,7 @@
2821 #include <asm/firmware.h>
2822 #include <asm/vdso.h>
2823 #include <asm/vdso_datapage.h>
2824 +#include <asm/mman.h>
2825
2826 #include "setup.h"
2827
2828 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2829 vdso_base = VDSO32_MBASE;
2830 #endif
2831
2832 - current->mm->context.vdso_base = 0;
2833 + current->mm->context.vdso_base = ~0UL;
2834
2835 /* vDSO has a problem and was disabled, just don't "enable" it for the
2836 * process
2837 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2838 vdso_base = get_unmapped_area(NULL, vdso_base,
2839 (vdso_pages << PAGE_SHIFT) +
2840 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2841 - 0, 0);
2842 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2843 if (IS_ERR_VALUE(vdso_base)) {
2844 rc = vdso_base;
2845 goto fail_mmapsem;
2846 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2847 index 5eea6f3..5d10396 100644
2848 --- a/arch/powerpc/lib/usercopy_64.c
2849 +++ b/arch/powerpc/lib/usercopy_64.c
2850 @@ -9,22 +9,6 @@
2851 #include <linux/module.h>
2852 #include <asm/uaccess.h>
2853
2854 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2855 -{
2856 - if (likely(access_ok(VERIFY_READ, from, n)))
2857 - n = __copy_from_user(to, from, n);
2858 - else
2859 - memset(to, 0, n);
2860 - return n;
2861 -}
2862 -
2863 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2864 -{
2865 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2866 - n = __copy_to_user(to, from, n);
2867 - return n;
2868 -}
2869 -
2870 unsigned long copy_in_user(void __user *to, const void __user *from,
2871 unsigned long n)
2872 {
2873 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2874 return n;
2875 }
2876
2877 -EXPORT_SYMBOL(copy_from_user);
2878 -EXPORT_SYMBOL(copy_to_user);
2879 EXPORT_SYMBOL(copy_in_user);
2880
2881 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2882 index 5efe8c9..db9ceef 100644
2883 --- a/arch/powerpc/mm/fault.c
2884 +++ b/arch/powerpc/mm/fault.c
2885 @@ -32,6 +32,10 @@
2886 #include <linux/perf_event.h>
2887 #include <linux/magic.h>
2888 #include <linux/ratelimit.h>
2889 +#include <linux/slab.h>
2890 +#include <linux/pagemap.h>
2891 +#include <linux/compiler.h>
2892 +#include <linux/unistd.h>
2893
2894 #include <asm/firmware.h>
2895 #include <asm/page.h>
2896 @@ -43,6 +47,7 @@
2897 #include <asm/tlbflush.h>
2898 #include <asm/siginfo.h>
2899 #include <mm/mmu_decl.h>
2900 +#include <asm/ptrace.h>
2901
2902 #ifdef CONFIG_KPROBES
2903 static inline int notify_page_fault(struct pt_regs *regs)
2904 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2905 }
2906 #endif
2907
2908 +#ifdef CONFIG_PAX_PAGEEXEC
2909 +/*
2910 + * PaX: decide what to do with offenders (regs->nip = fault address)
2911 + *
2912 + * returns 1 when task should be killed
2913 + */
2914 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2915 +{
2916 + return 1;
2917 +}
2918 +
2919 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2920 +{
2921 + unsigned long i;
2922 +
2923 + printk(KERN_ERR "PAX: bytes at PC: ");
2924 + for (i = 0; i < 5; i++) {
2925 + unsigned int c;
2926 + if (get_user(c, (unsigned int __user *)pc+i))
2927 + printk(KERN_CONT "???????? ");
2928 + else
2929 + printk(KERN_CONT "%08x ", c);
2930 + }
2931 + printk("\n");
2932 +}
2933 +#endif
2934 +
2935 /*
2936 * Check whether the instruction at regs->nip is a store using
2937 * an update addressing form which will update r1.
2938 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2939 * indicate errors in DSISR but can validly be set in SRR1.
2940 */
2941 if (trap == 0x400)
2942 - error_code &= 0x48200000;
2943 + error_code &= 0x58200000;
2944 else
2945 is_write = error_code & DSISR_ISSTORE;
2946 #else
2947 @@ -259,7 +291,7 @@ good_area:
2948 * "undefined". Of those that can be set, this is the only
2949 * one which seems bad.
2950 */
2951 - if (error_code & 0x10000000)
2952 + if (error_code & DSISR_GUARDED)
2953 /* Guarded storage error. */
2954 goto bad_area;
2955 #endif /* CONFIG_8xx */
2956 @@ -274,7 +306,7 @@ good_area:
2957 * processors use the same I/D cache coherency mechanism
2958 * as embedded.
2959 */
2960 - if (error_code & DSISR_PROTFAULT)
2961 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2962 goto bad_area;
2963 #endif /* CONFIG_PPC_STD_MMU */
2964
2965 @@ -343,6 +375,23 @@ bad_area:
2966 bad_area_nosemaphore:
2967 /* User mode accesses cause a SIGSEGV */
2968 if (user_mode(regs)) {
2969 +
2970 +#ifdef CONFIG_PAX_PAGEEXEC
2971 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2972 +#ifdef CONFIG_PPC_STD_MMU
2973 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2974 +#else
2975 + if (is_exec && regs->nip == address) {
2976 +#endif
2977 + switch (pax_handle_fetch_fault(regs)) {
2978 + }
2979 +
2980 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2981 + do_group_exit(SIGKILL);
2982 + }
2983 + }
2984 +#endif
2985 +
2986 _exception(SIGSEGV, regs, code, address);
2987 return 0;
2988 }
2989 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2990 index 5a783d8..c23e14b 100644
2991 --- a/arch/powerpc/mm/mmap_64.c
2992 +++ b/arch/powerpc/mm/mmap_64.c
2993 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2994 */
2995 if (mmap_is_legacy()) {
2996 mm->mmap_base = TASK_UNMAPPED_BASE;
2997 +
2998 +#ifdef CONFIG_PAX_RANDMMAP
2999 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3000 + mm->mmap_base += mm->delta_mmap;
3001 +#endif
3002 +
3003 mm->get_unmapped_area = arch_get_unmapped_area;
3004 mm->unmap_area = arch_unmap_area;
3005 } else {
3006 mm->mmap_base = mmap_base();
3007 +
3008 +#ifdef CONFIG_PAX_RANDMMAP
3009 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3010 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3011 +#endif
3012 +
3013 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3014 mm->unmap_area = arch_unmap_area_topdown;
3015 }
3016 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3017 index 73709f7..6b90313 100644
3018 --- a/arch/powerpc/mm/slice.c
3019 +++ b/arch/powerpc/mm/slice.c
3020 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3021 if ((mm->task_size - len) < addr)
3022 return 0;
3023 vma = find_vma(mm, addr);
3024 - return (!vma || (addr + len) <= vma->vm_start);
3025 + return check_heap_stack_gap(vma, addr, len);
3026 }
3027
3028 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3029 @@ -256,7 +256,7 @@ full_search:
3030 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3031 continue;
3032 }
3033 - if (!vma || addr + len <= vma->vm_start) {
3034 + if (check_heap_stack_gap(vma, addr, len)) {
3035 /*
3036 * Remember the place where we stopped the search:
3037 */
3038 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3039 }
3040 }
3041
3042 - addr = mm->mmap_base;
3043 - while (addr > len) {
3044 + if (mm->mmap_base < len)
3045 + addr = -ENOMEM;
3046 + else
3047 + addr = mm->mmap_base - len;
3048 +
3049 + while (!IS_ERR_VALUE(addr)) {
3050 /* Go down by chunk size */
3051 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3052 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3053
3054 /* Check for hit with different page size */
3055 mask = slice_range_to_mask(addr, len);
3056 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3057 * return with success:
3058 */
3059 vma = find_vma(mm, addr);
3060 - if (!vma || (addr + len) <= vma->vm_start) {
3061 + if (check_heap_stack_gap(vma, addr, len)) {
3062 /* remember the address as a hint for next time */
3063 if (use_cache)
3064 mm->free_area_cache = addr;
3065 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3066 mm->cached_hole_size = vma->vm_start - addr;
3067
3068 /* try just below the current vma->vm_start */
3069 - addr = vma->vm_start;
3070 + addr = skip_heap_stack_gap(vma, len);
3071 }
3072
3073 /*
3074 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3075 if (fixed && addr > (mm->task_size - len))
3076 return -EINVAL;
3077
3078 +#ifdef CONFIG_PAX_RANDMMAP
3079 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3080 + addr = 0;
3081 +#endif
3082 +
3083 /* If hint, make sure it matches our alignment restrictions */
3084 if (!fixed && addr) {
3085 addr = _ALIGN_UP(addr, 1ul << pshift);
3086 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
3087 index 8517d2a..d2738d4 100644
3088 --- a/arch/s390/include/asm/atomic.h
3089 +++ b/arch/s390/include/asm/atomic.h
3090 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
3091 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
3092 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3093
3094 +#define atomic64_read_unchecked(v) atomic64_read(v)
3095 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3096 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3097 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3098 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3099 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3100 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3101 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3102 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3103 +
3104 #define smp_mb__before_atomic_dec() smp_mb()
3105 #define smp_mb__after_atomic_dec() smp_mb()
3106 #define smp_mb__before_atomic_inc() smp_mb()
3107 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3108 index 547f1a6..0b22b53 100644
3109 --- a/arch/s390/include/asm/elf.h
3110 +++ b/arch/s390/include/asm/elf.h
3111 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
3112 the loader. We need to make sure that it is out of the way of the program
3113 that it will "exec", and that there is sufficient room for the brk. */
3114
3115 -extern unsigned long randomize_et_dyn(unsigned long base);
3116 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
3117 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3118 +
3119 +#ifdef CONFIG_PAX_ASLR
3120 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3121 +
3122 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3123 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3124 +#endif
3125
3126 /* This yields a mask that user programs can use to figure out what
3127 instruction set this CPU supports. */
3128 @@ -211,7 +217,4 @@ struct linux_binprm;
3129 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3130 int arch_setup_additional_pages(struct linux_binprm *, int);
3131
3132 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3133 -#define arch_randomize_brk arch_randomize_brk
3134 -
3135 #endif
3136 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
3137 index ef573c1..75a1ce6 100644
3138 --- a/arch/s390/include/asm/system.h
3139 +++ b/arch/s390/include/asm/system.h
3140 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
3141 extern void (*_machine_halt)(void);
3142 extern void (*_machine_power_off)(void);
3143
3144 -extern unsigned long arch_align_stack(unsigned long sp);
3145 +#define arch_align_stack(x) ((x) & ~0xfUL)
3146
3147 static inline int tprot(unsigned long addr)
3148 {
3149 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3150 index 2b23885..e136e31 100644
3151 --- a/arch/s390/include/asm/uaccess.h
3152 +++ b/arch/s390/include/asm/uaccess.h
3153 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
3154 copy_to_user(void __user *to, const void *from, unsigned long n)
3155 {
3156 might_fault();
3157 +
3158 + if ((long)n < 0)
3159 + return n;
3160 +
3161 if (access_ok(VERIFY_WRITE, to, n))
3162 n = __copy_to_user(to, from, n);
3163 return n;
3164 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3165 static inline unsigned long __must_check
3166 __copy_from_user(void *to, const void __user *from, unsigned long n)
3167 {
3168 + if ((long)n < 0)
3169 + return n;
3170 +
3171 if (__builtin_constant_p(n) && (n <= 256))
3172 return uaccess.copy_from_user_small(n, from, to);
3173 else
3174 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
3175 unsigned int sz = __compiletime_object_size(to);
3176
3177 might_fault();
3178 +
3179 + if ((long)n < 0)
3180 + return n;
3181 +
3182 if (unlikely(sz != -1 && sz < n)) {
3183 copy_from_user_overflow();
3184 return n;
3185 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3186 index dfcb343..eda788a 100644
3187 --- a/arch/s390/kernel/module.c
3188 +++ b/arch/s390/kernel/module.c
3189 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3190
3191 /* Increase core size by size of got & plt and set start
3192 offsets for got and plt. */
3193 - me->core_size = ALIGN(me->core_size, 4);
3194 - me->arch.got_offset = me->core_size;
3195 - me->core_size += me->arch.got_size;
3196 - me->arch.plt_offset = me->core_size;
3197 - me->core_size += me->arch.plt_size;
3198 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3199 + me->arch.got_offset = me->core_size_rw;
3200 + me->core_size_rw += me->arch.got_size;
3201 + me->arch.plt_offset = me->core_size_rx;
3202 + me->core_size_rx += me->arch.plt_size;
3203 return 0;
3204 }
3205
3206 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3207 if (info->got_initialized == 0) {
3208 Elf_Addr *gotent;
3209
3210 - gotent = me->module_core + me->arch.got_offset +
3211 + gotent = me->module_core_rw + me->arch.got_offset +
3212 info->got_offset;
3213 *gotent = val;
3214 info->got_initialized = 1;
3215 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3216 else if (r_type == R_390_GOTENT ||
3217 r_type == R_390_GOTPLTENT)
3218 *(unsigned int *) loc =
3219 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3220 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3221 else if (r_type == R_390_GOT64 ||
3222 r_type == R_390_GOTPLT64)
3223 *(unsigned long *) loc = val;
3224 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3225 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3226 if (info->plt_initialized == 0) {
3227 unsigned int *ip;
3228 - ip = me->module_core + me->arch.plt_offset +
3229 + ip = me->module_core_rx + me->arch.plt_offset +
3230 info->plt_offset;
3231 #ifndef CONFIG_64BIT
3232 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3233 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3234 val - loc + 0xffffUL < 0x1ffffeUL) ||
3235 (r_type == R_390_PLT32DBL &&
3236 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3237 - val = (Elf_Addr) me->module_core +
3238 + val = (Elf_Addr) me->module_core_rx +
3239 me->arch.plt_offset +
3240 info->plt_offset;
3241 val += rela->r_addend - loc;
3242 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3243 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3244 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3245 val = val + rela->r_addend -
3246 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3247 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3248 if (r_type == R_390_GOTOFF16)
3249 *(unsigned short *) loc = val;
3250 else if (r_type == R_390_GOTOFF32)
3251 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3252 break;
3253 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3254 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3255 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3256 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3257 rela->r_addend - loc;
3258 if (r_type == R_390_GOTPC)
3259 *(unsigned int *) loc = val;
3260 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3261 index 9451b21..ed8956f 100644
3262 --- a/arch/s390/kernel/process.c
3263 +++ b/arch/s390/kernel/process.c
3264 @@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3265 }
3266 return 0;
3267 }
3268 -
3269 -unsigned long arch_align_stack(unsigned long sp)
3270 -{
3271 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3272 - sp -= get_random_int() & ~PAGE_MASK;
3273 - return sp & ~0xf;
3274 -}
3275 -
3276 -static inline unsigned long brk_rnd(void)
3277 -{
3278 - /* 8MB for 32bit, 1GB for 64bit */
3279 - if (is_32bit_task())
3280 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3281 - else
3282 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3283 -}
3284 -
3285 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3286 -{
3287 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3288 -
3289 - if (ret < mm->brk)
3290 - return mm->brk;
3291 - return ret;
3292 -}
3293 -
3294 -unsigned long randomize_et_dyn(unsigned long base)
3295 -{
3296 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3297 -
3298 - if (!(current->flags & PF_RANDOMIZE))
3299 - return base;
3300 - if (ret < base)
3301 - return base;
3302 - return ret;
3303 -}
3304 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3305 index f09c748..cf9ec1d 100644
3306 --- a/arch/s390/mm/mmap.c
3307 +++ b/arch/s390/mm/mmap.c
3308 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3309 */
3310 if (mmap_is_legacy()) {
3311 mm->mmap_base = TASK_UNMAPPED_BASE;
3312 +
3313 +#ifdef CONFIG_PAX_RANDMMAP
3314 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3315 + mm->mmap_base += mm->delta_mmap;
3316 +#endif
3317 +
3318 mm->get_unmapped_area = arch_get_unmapped_area;
3319 mm->unmap_area = arch_unmap_area;
3320 } else {
3321 mm->mmap_base = mmap_base();
3322 +
3323 +#ifdef CONFIG_PAX_RANDMMAP
3324 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3325 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3326 +#endif
3327 +
3328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3329 mm->unmap_area = arch_unmap_area_topdown;
3330 }
3331 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3332 */
3333 if (mmap_is_legacy()) {
3334 mm->mmap_base = TASK_UNMAPPED_BASE;
3335 +
3336 +#ifdef CONFIG_PAX_RANDMMAP
3337 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3338 + mm->mmap_base += mm->delta_mmap;
3339 +#endif
3340 +
3341 mm->get_unmapped_area = s390_get_unmapped_area;
3342 mm->unmap_area = arch_unmap_area;
3343 } else {
3344 mm->mmap_base = mmap_base();
3345 +
3346 +#ifdef CONFIG_PAX_RANDMMAP
3347 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3348 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3349 +#endif
3350 +
3351 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3352 mm->unmap_area = arch_unmap_area_topdown;
3353 }
3354 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3355 index 589d5c7..669e274 100644
3356 --- a/arch/score/include/asm/system.h
3357 +++ b/arch/score/include/asm/system.h
3358 @@ -17,7 +17,7 @@ do { \
3359 #define finish_arch_switch(prev) do {} while (0)
3360
3361 typedef void (*vi_handler_t)(void);
3362 -extern unsigned long arch_align_stack(unsigned long sp);
3363 +#define arch_align_stack(x) (x)
3364
3365 #define mb() barrier()
3366 #define rmb() barrier()
3367 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3368 index 25d0803..d6c8e36 100644
3369 --- a/arch/score/kernel/process.c
3370 +++ b/arch/score/kernel/process.c
3371 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3372
3373 return task_pt_regs(task)->cp0_epc;
3374 }
3375 -
3376 -unsigned long arch_align_stack(unsigned long sp)
3377 -{
3378 - return sp;
3379 -}
3380 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3381 index afeb710..d1d1289 100644
3382 --- a/arch/sh/mm/mmap.c
3383 +++ b/arch/sh/mm/mmap.c
3384 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3385 addr = PAGE_ALIGN(addr);
3386
3387 vma = find_vma(mm, addr);
3388 - if (TASK_SIZE - len >= addr &&
3389 - (!vma || addr + len <= vma->vm_start))
3390 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3391 return addr;
3392 }
3393
3394 @@ -106,7 +105,7 @@ full_search:
3395 }
3396 return -ENOMEM;
3397 }
3398 - if (likely(!vma || addr + len <= vma->vm_start)) {
3399 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3400 /*
3401 * Remember the place where we stopped the search:
3402 */
3403 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3404 addr = PAGE_ALIGN(addr);
3405
3406 vma = find_vma(mm, addr);
3407 - if (TASK_SIZE - len >= addr &&
3408 - (!vma || addr + len <= vma->vm_start))
3409 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3410 return addr;
3411 }
3412
3413 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3414 /* make sure it can fit in the remaining address space */
3415 if (likely(addr > len)) {
3416 vma = find_vma(mm, addr-len);
3417 - if (!vma || addr <= vma->vm_start) {
3418 + if (check_heap_stack_gap(vma, addr - len, len)) {
3419 /* remember the address as a hint for next time */
3420 return (mm->free_area_cache = addr-len);
3421 }
3422 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3423 if (unlikely(mm->mmap_base < len))
3424 goto bottomup;
3425
3426 - addr = mm->mmap_base-len;
3427 - if (do_colour_align)
3428 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3429 + addr = mm->mmap_base - len;
3430
3431 do {
3432 + if (do_colour_align)
3433 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3434 /*
3435 * Lookup failure means no vma is above this address,
3436 * else if new region fits below vma->vm_start,
3437 * return with success:
3438 */
3439 vma = find_vma(mm, addr);
3440 - if (likely(!vma || addr+len <= vma->vm_start)) {
3441 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3442 /* remember the address as a hint for next time */
3443 return (mm->free_area_cache = addr);
3444 }
3445 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3446 mm->cached_hole_size = vma->vm_start - addr;
3447
3448 /* try just below the current vma->vm_start */
3449 - addr = vma->vm_start-len;
3450 - if (do_colour_align)
3451 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3452 - } while (likely(len < vma->vm_start));
3453 + addr = skip_heap_stack_gap(vma, len);
3454 + } while (!IS_ERR_VALUE(addr));
3455
3456 bottomup:
3457 /*
3458 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
3459 index f92602e..27060b2 100644
3460 --- a/arch/sparc/Kconfig
3461 +++ b/arch/sparc/Kconfig
3462 @@ -31,6 +31,7 @@ config SPARC
3463
3464 config SPARC32
3465 def_bool !64BIT
3466 + select GENERIC_ATOMIC64
3467
3468 config SPARC64
3469 def_bool 64BIT
3470 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3471 index ad1fb5d..fc5315b 100644
3472 --- a/arch/sparc/Makefile
3473 +++ b/arch/sparc/Makefile
3474 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3475 # Export what is needed by arch/sparc/boot/Makefile
3476 export VMLINUX_INIT VMLINUX_MAIN
3477 VMLINUX_INIT := $(head-y) $(init-y)
3478 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3479 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3480 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3481 VMLINUX_MAIN += $(drivers-y) $(net-y)
3482
3483 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
3484 index 8ff83d8..4a459c2 100644
3485 --- a/arch/sparc/include/asm/atomic.h
3486 +++ b/arch/sparc/include/asm/atomic.h
3487 @@ -4,5 +4,6 @@
3488 #include <asm/atomic_64.h>
3489 #else
3490 #include <asm/atomic_32.h>
3491 +#include <asm-generic/atomic64.h>
3492 #endif
3493 #endif
3494 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3495 index 9f421df..b81fc12 100644
3496 --- a/arch/sparc/include/asm/atomic_64.h
3497 +++ b/arch/sparc/include/asm/atomic_64.h
3498 @@ -14,18 +14,40 @@
3499 #define ATOMIC64_INIT(i) { (i) }
3500
3501 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3502 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3503 +{
3504 + return v->counter;
3505 +}
3506 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3507 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3508 +{
3509 + return v->counter;
3510 +}
3511
3512 #define atomic_set(v, i) (((v)->counter) = i)
3513 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3514 +{
3515 + v->counter = i;
3516 +}
3517 #define atomic64_set(v, i) (((v)->counter) = i)
3518 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3519 +{
3520 + v->counter = i;
3521 +}
3522
3523 extern void atomic_add(int, atomic_t *);
3524 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3525 extern void atomic64_add(long, atomic64_t *);
3526 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3527 extern void atomic_sub(int, atomic_t *);
3528 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3529 extern void atomic64_sub(long, atomic64_t *);
3530 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3531
3532 extern int atomic_add_ret(int, atomic_t *);
3533 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3534 extern long atomic64_add_ret(long, atomic64_t *);
3535 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3536 extern int atomic_sub_ret(int, atomic_t *);
3537 extern long atomic64_sub_ret(long, atomic64_t *);
3538
3539 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3540 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3541
3542 #define atomic_inc_return(v) atomic_add_ret(1, v)
3543 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3544 +{
3545 + return atomic_add_ret_unchecked(1, v);
3546 +}
3547 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3548 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3549 +{
3550 + return atomic64_add_ret_unchecked(1, v);
3551 +}
3552
3553 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3554 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3555
3556 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3557 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3558 +{
3559 + return atomic_add_ret_unchecked(i, v);
3560 +}
3561 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3562 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3563 +{
3564 + return atomic64_add_ret_unchecked(i, v);
3565 +}
3566
3567 /*
3568 * atomic_inc_and_test - increment and test
3569 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3570 * other cases.
3571 */
3572 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3573 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3574 +{
3575 + return atomic_inc_return_unchecked(v) == 0;
3576 +}
3577 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3578
3579 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3580 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3581 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3582
3583 #define atomic_inc(v) atomic_add(1, v)
3584 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3585 +{
3586 + atomic_add_unchecked(1, v);
3587 +}
3588 #define atomic64_inc(v) atomic64_add(1, v)
3589 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3590 +{
3591 + atomic64_add_unchecked(1, v);
3592 +}
3593
3594 #define atomic_dec(v) atomic_sub(1, v)
3595 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3596 +{
3597 + atomic_sub_unchecked(1, v);
3598 +}
3599 #define atomic64_dec(v) atomic64_sub(1, v)
3600 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3601 +{
3602 + atomic64_sub_unchecked(1, v);
3603 +}
3604
3605 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3606 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3607
3608 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3609 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3610 +{
3611 + return cmpxchg(&v->counter, old, new);
3612 +}
3613 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3614 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3615 +{
3616 + return xchg(&v->counter, new);
3617 +}
3618
3619 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3620 {
3621 - int c, old;
3622 + int c, old, new;
3623 c = atomic_read(v);
3624 for (;;) {
3625 - if (unlikely(c == (u)))
3626 + if (unlikely(c == u))
3627 break;
3628 - old = atomic_cmpxchg((v), c, c + (a));
3629 +
3630 + asm volatile("addcc %2, %0, %0\n"
3631 +
3632 +#ifdef CONFIG_PAX_REFCOUNT
3633 + "tvs %%icc, 6\n"
3634 +#endif
3635 +
3636 + : "=r" (new)
3637 + : "0" (c), "ir" (a)
3638 + : "cc");
3639 +
3640 + old = atomic_cmpxchg(v, c, new);
3641 if (likely(old == c))
3642 break;
3643 c = old;
3644 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3645 #define atomic64_cmpxchg(v, o, n) \
3646 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3647 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3648 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3649 +{
3650 + return xchg(&v->counter, new);
3651 +}
3652
3653 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3654 {
3655 - long c, old;
3656 + long c, old, new;
3657 c = atomic64_read(v);
3658 for (;;) {
3659 - if (unlikely(c == (u)))
3660 + if (unlikely(c == u))
3661 break;
3662 - old = atomic64_cmpxchg((v), c, c + (a));
3663 +
3664 + asm volatile("addcc %2, %0, %0\n"
3665 +
3666 +#ifdef CONFIG_PAX_REFCOUNT
3667 + "tvs %%xcc, 6\n"
3668 +#endif
3669 +
3670 + : "=r" (new)
3671 + : "0" (c), "ir" (a)
3672 + : "cc");
3673 +
3674 + old = atomic64_cmpxchg(v, c, new);
3675 if (likely(old == c))
3676 break;
3677 c = old;
3678 }
3679 - return c != (u);
3680 + return c != u;
3681 }
3682
3683 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3684 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3685 index 69358b5..17b4745 100644
3686 --- a/arch/sparc/include/asm/cache.h
3687 +++ b/arch/sparc/include/asm/cache.h
3688 @@ -10,7 +10,7 @@
3689 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3690
3691 #define L1_CACHE_SHIFT 5
3692 -#define L1_CACHE_BYTES 32
3693 +#define L1_CACHE_BYTES 32UL
3694
3695 #ifdef CONFIG_SPARC32
3696 #define SMP_CACHE_BYTES_SHIFT 5
3697 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3698 index 4269ca6..e3da77f 100644
3699 --- a/arch/sparc/include/asm/elf_32.h
3700 +++ b/arch/sparc/include/asm/elf_32.h
3701 @@ -114,6 +114,13 @@ typedef struct {
3702
3703 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3704
3705 +#ifdef CONFIG_PAX_ASLR
3706 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3707 +
3708 +#define PAX_DELTA_MMAP_LEN 16
3709 +#define PAX_DELTA_STACK_LEN 16
3710 +#endif
3711 +
3712 /* This yields a mask that user programs can use to figure out what
3713 instruction set this cpu supports. This can NOT be done in userspace
3714 on Sparc. */
3715 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3716 index 7df8b7f..4946269 100644
3717 --- a/arch/sparc/include/asm/elf_64.h
3718 +++ b/arch/sparc/include/asm/elf_64.h
3719 @@ -180,6 +180,13 @@ typedef struct {
3720 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3721 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3722
3723 +#ifdef CONFIG_PAX_ASLR
3724 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3725 +
3726 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3727 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3728 +#endif
3729 +
3730 extern unsigned long sparc64_elf_hwcap;
3731 #define ELF_HWCAP sparc64_elf_hwcap
3732
3733 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
3734 index 156707b..aefa786 100644
3735 --- a/arch/sparc/include/asm/page_32.h
3736 +++ b/arch/sparc/include/asm/page_32.h
3737 @@ -8,6 +8,8 @@
3738 #ifndef _SPARC_PAGE_H
3739 #define _SPARC_PAGE_H
3740
3741 +#include <linux/const.h>
3742 +
3743 #define PAGE_SHIFT 12
3744
3745 #ifndef __ASSEMBLY__
3746 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3747 index a790cc6..091ed94 100644
3748 --- a/arch/sparc/include/asm/pgtable_32.h
3749 +++ b/arch/sparc/include/asm/pgtable_32.h
3750 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3751 BTFIXUPDEF_INT(page_none)
3752 BTFIXUPDEF_INT(page_copy)
3753 BTFIXUPDEF_INT(page_readonly)
3754 +
3755 +#ifdef CONFIG_PAX_PAGEEXEC
3756 +BTFIXUPDEF_INT(page_shared_noexec)
3757 +BTFIXUPDEF_INT(page_copy_noexec)
3758 +BTFIXUPDEF_INT(page_readonly_noexec)
3759 +#endif
3760 +
3761 BTFIXUPDEF_INT(page_kernel)
3762
3763 #define PMD_SHIFT SUN4C_PMD_SHIFT
3764 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3765 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3766 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3767
3768 +#ifdef CONFIG_PAX_PAGEEXEC
3769 +extern pgprot_t PAGE_SHARED_NOEXEC;
3770 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3771 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3772 +#else
3773 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3774 +# define PAGE_COPY_NOEXEC PAGE_COPY
3775 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3776 +#endif
3777 +
3778 extern unsigned long page_kernel;
3779
3780 #ifdef MODULE
3781 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3782 index f6ae2b2..b03ffc7 100644
3783 --- a/arch/sparc/include/asm/pgtsrmmu.h
3784 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3785 @@ -115,6 +115,13 @@
3786 SRMMU_EXEC | SRMMU_REF)
3787 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3788 SRMMU_EXEC | SRMMU_REF)
3789 +
3790 +#ifdef CONFIG_PAX_PAGEEXEC
3791 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3792 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3793 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3794 +#endif
3795 +
3796 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3797 SRMMU_DIRTY | SRMMU_REF)
3798
3799 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3800 index 9689176..63c18ea 100644
3801 --- a/arch/sparc/include/asm/spinlock_64.h
3802 +++ b/arch/sparc/include/asm/spinlock_64.h
3803 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3804
3805 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3806
3807 -static void inline arch_read_lock(arch_rwlock_t *lock)
3808 +static inline void arch_read_lock(arch_rwlock_t *lock)
3809 {
3810 unsigned long tmp1, tmp2;
3811
3812 __asm__ __volatile__ (
3813 "1: ldsw [%2], %0\n"
3814 " brlz,pn %0, 2f\n"
3815 -"4: add %0, 1, %1\n"
3816 +"4: addcc %0, 1, %1\n"
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 +" tvs %%icc, 6\n"
3820 +#endif
3821 +
3822 " cas [%2], %0, %1\n"
3823 " cmp %0, %1\n"
3824 " bne,pn %%icc, 1b\n"
3825 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3826 " .previous"
3827 : "=&r" (tmp1), "=&r" (tmp2)
3828 : "r" (lock)
3829 - : "memory");
3830 + : "memory", "cc");
3831 }
3832
3833 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3834 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3835 {
3836 int tmp1, tmp2;
3837
3838 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3839 "1: ldsw [%2], %0\n"
3840 " brlz,a,pn %0, 2f\n"
3841 " mov 0, %0\n"
3842 -" add %0, 1, %1\n"
3843 +" addcc %0, 1, %1\n"
3844 +
3845 +#ifdef CONFIG_PAX_REFCOUNT
3846 +" tvs %%icc, 6\n"
3847 +#endif
3848 +
3849 " cas [%2], %0, %1\n"
3850 " cmp %0, %1\n"
3851 " bne,pn %%icc, 1b\n"
3852 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3853 return tmp1;
3854 }
3855
3856 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3857 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3858 {
3859 unsigned long tmp1, tmp2;
3860
3861 __asm__ __volatile__(
3862 "1: lduw [%2], %0\n"
3863 -" sub %0, 1, %1\n"
3864 +" subcc %0, 1, %1\n"
3865 +
3866 +#ifdef CONFIG_PAX_REFCOUNT
3867 +" tvs %%icc, 6\n"
3868 +#endif
3869 +
3870 " cas [%2], %0, %1\n"
3871 " cmp %0, %1\n"
3872 " bne,pn %%xcc, 1b\n"
3873 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3874 : "memory");
3875 }
3876
3877 -static void inline arch_write_lock(arch_rwlock_t *lock)
3878 +static inline void arch_write_lock(arch_rwlock_t *lock)
3879 {
3880 unsigned long mask, tmp1, tmp2;
3881
3882 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3883 : "memory");
3884 }
3885
3886 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3887 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3888 {
3889 __asm__ __volatile__(
3890 " stw %%g0, [%0]"
3891 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3892 : "memory");
3893 }
3894
3895 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3896 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3897 {
3898 unsigned long mask, tmp1, tmp2, result;
3899
3900 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3901 index fa57532..e1a4c53 100644
3902 --- a/arch/sparc/include/asm/thread_info_32.h
3903 +++ b/arch/sparc/include/asm/thread_info_32.h
3904 @@ -50,6 +50,8 @@ struct thread_info {
3905 unsigned long w_saved;
3906
3907 struct restart_block restart_block;
3908 +
3909 + unsigned long lowest_stack;
3910 };
3911
3912 /*
3913 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3914 index 60d86be..952dea1 100644
3915 --- a/arch/sparc/include/asm/thread_info_64.h
3916 +++ b/arch/sparc/include/asm/thread_info_64.h
3917 @@ -63,6 +63,8 @@ struct thread_info {
3918 struct pt_regs *kern_una_regs;
3919 unsigned int kern_una_insn;
3920
3921 + unsigned long lowest_stack;
3922 +
3923 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3924 };
3925
3926 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3927 index e88fbe5..96b0ce5 100644
3928 --- a/arch/sparc/include/asm/uaccess.h
3929 +++ b/arch/sparc/include/asm/uaccess.h
3930 @@ -1,5 +1,13 @@
3931 #ifndef ___ASM_SPARC_UACCESS_H
3932 #define ___ASM_SPARC_UACCESS_H
3933 +
3934 +#ifdef __KERNEL__
3935 +#ifndef __ASSEMBLY__
3936 +#include <linux/types.h>
3937 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3938 +#endif
3939 +#endif
3940 +
3941 #if defined(__sparc__) && defined(__arch64__)
3942 #include <asm/uaccess_64.h>
3943 #else
3944 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3945 index 8303ac4..07f333d 100644
3946 --- a/arch/sparc/include/asm/uaccess_32.h
3947 +++ b/arch/sparc/include/asm/uaccess_32.h
3948 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3949
3950 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3951 {
3952 - if (n && __access_ok((unsigned long) to, n))
3953 + if ((long)n < 0)
3954 + return n;
3955 +
3956 + if (n && __access_ok((unsigned long) to, n)) {
3957 + if (!__builtin_constant_p(n))
3958 + check_object_size(from, n, true);
3959 return __copy_user(to, (__force void __user *) from, n);
3960 - else
3961 + } else
3962 return n;
3963 }
3964
3965 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3966 {
3967 + if ((long)n < 0)
3968 + return n;
3969 +
3970 + if (!__builtin_constant_p(n))
3971 + check_object_size(from, n, true);
3972 +
3973 return __copy_user(to, (__force void __user *) from, n);
3974 }
3975
3976 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3977 {
3978 - if (n && __access_ok((unsigned long) from, n))
3979 + if ((long)n < 0)
3980 + return n;
3981 +
3982 + if (n && __access_ok((unsigned long) from, n)) {
3983 + if (!__builtin_constant_p(n))
3984 + check_object_size(to, n, false);
3985 return __copy_user((__force void __user *) to, from, n);
3986 - else
3987 + } else
3988 return n;
3989 }
3990
3991 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3992 {
3993 + if ((long)n < 0)
3994 + return n;
3995 +
3996 return __copy_user((__force void __user *) to, from, n);
3997 }
3998
3999 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4000 index 3e1449f..5293a0e 100644
4001 --- a/arch/sparc/include/asm/uaccess_64.h
4002 +++ b/arch/sparc/include/asm/uaccess_64.h
4003 @@ -10,6 +10,7 @@
4004 #include <linux/compiler.h>
4005 #include <linux/string.h>
4006 #include <linux/thread_info.h>
4007 +#include <linux/kernel.h>
4008 #include <asm/asi.h>
4009 #include <asm/system.h>
4010 #include <asm/spitfire.h>
4011 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4012 static inline unsigned long __must_check
4013 copy_from_user(void *to, const void __user *from, unsigned long size)
4014 {
4015 - unsigned long ret = ___copy_from_user(to, from, size);
4016 + unsigned long ret;
4017
4018 + if ((long)size < 0 || size > INT_MAX)
4019 + return size;
4020 +
4021 + if (!__builtin_constant_p(size))
4022 + check_object_size(to, size, false);
4023 +
4024 + ret = ___copy_from_user(to, from, size);
4025 if (unlikely(ret))
4026 ret = copy_from_user_fixup(to, from, size);
4027
4028 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4029 static inline unsigned long __must_check
4030 copy_to_user(void __user *to, const void *from, unsigned long size)
4031 {
4032 - unsigned long ret = ___copy_to_user(to, from, size);
4033 + unsigned long ret;
4034
4035 + if ((long)size < 0 || size > INT_MAX)
4036 + return size;
4037 +
4038 + if (!__builtin_constant_p(size))
4039 + check_object_size(from, size, true);
4040 +
4041 + ret = ___copy_to_user(to, from, size);
4042 if (unlikely(ret))
4043 ret = copy_to_user_fixup(to, from, size);
4044 return ret;
4045 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4046 index cb85458..e063f17 100644
4047 --- a/arch/sparc/kernel/Makefile
4048 +++ b/arch/sparc/kernel/Makefile
4049 @@ -3,7 +3,7 @@
4050 #
4051
4052 asflags-y := -ansi
4053 -ccflags-y := -Werror
4054 +#ccflags-y := -Werror
4055
4056 extra-y := head_$(BITS).o
4057 extra-y += init_task.o
4058 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4059 index f793742..4d880af 100644
4060 --- a/arch/sparc/kernel/process_32.c
4061 +++ b/arch/sparc/kernel/process_32.c
4062 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
4063 rw->ins[4], rw->ins[5],
4064 rw->ins[6],
4065 rw->ins[7]);
4066 - printk("%pS\n", (void *) rw->ins[7]);
4067 + printk("%pA\n", (void *) rw->ins[7]);
4068 rw = (struct reg_window32 *) rw->ins[6];
4069 }
4070 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4071 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
4072
4073 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4074 r->psr, r->pc, r->npc, r->y, print_tainted());
4075 - printk("PC: <%pS>\n", (void *) r->pc);
4076 + printk("PC: <%pA>\n", (void *) r->pc);
4077 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4078 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4079 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4080 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4081 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4082 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4083 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4084 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4085
4086 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4087 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4088 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4089 rw = (struct reg_window32 *) fp;
4090 pc = rw->ins[7];
4091 printk("[%08lx : ", pc);
4092 - printk("%pS ] ", (void *) pc);
4093 + printk("%pA ] ", (void *) pc);
4094 fp = rw->ins[6];
4095 } while (++count < 16);
4096 printk("\n");
4097 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4098 index 3739a06..48b2ff0 100644
4099 --- a/arch/sparc/kernel/process_64.c
4100 +++ b/arch/sparc/kernel/process_64.c
4101 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4102 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4103 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4104 if (regs->tstate & TSTATE_PRIV)
4105 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4106 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4107 }
4108
4109 void show_regs(struct pt_regs *regs)
4110 {
4111 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4112 regs->tpc, regs->tnpc, regs->y, print_tainted());
4113 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4114 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4115 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4116 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4117 regs->u_regs[3]);
4118 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4119 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4120 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4121 regs->u_regs[15]);
4122 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4123 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4124 show_regwindow(regs);
4125 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
4126 }
4127 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
4128 ((tp && tp->task) ? tp->task->pid : -1));
4129
4130 if (gp->tstate & TSTATE_PRIV) {
4131 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4132 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4133 (void *) gp->tpc,
4134 (void *) gp->o7,
4135 (void *) gp->i7,
4136 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
4137 index 42b282f..28ce9f2 100644
4138 --- a/arch/sparc/kernel/sys_sparc_32.c
4139 +++ b/arch/sparc/kernel/sys_sparc_32.c
4140 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4141 if (ARCH_SUN4C && len > 0x20000000)
4142 return -ENOMEM;
4143 if (!addr)
4144 - addr = TASK_UNMAPPED_BASE;
4145 + addr = current->mm->mmap_base;
4146
4147 if (flags & MAP_SHARED)
4148 addr = COLOUR_ALIGN(addr);
4149 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4150 }
4151 if (TASK_SIZE - PAGE_SIZE - len < addr)
4152 return -ENOMEM;
4153 - if (!vmm || addr + len <= vmm->vm_start)
4154 + if (check_heap_stack_gap(vmm, addr, len))
4155 return addr;
4156 addr = vmm->vm_end;
4157 if (flags & MAP_SHARED)
4158 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
4159 index 441521a..b767073 100644
4160 --- a/arch/sparc/kernel/sys_sparc_64.c
4161 +++ b/arch/sparc/kernel/sys_sparc_64.c
4162 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4163 /* We do not accept a shared mapping if it would violate
4164 * cache aliasing constraints.
4165 */
4166 - if ((flags & MAP_SHARED) &&
4167 + if ((filp || (flags & MAP_SHARED)) &&
4168 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4169 return -EINVAL;
4170 return addr;
4171 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4172 if (filp || (flags & MAP_SHARED))
4173 do_color_align = 1;
4174
4175 +#ifdef CONFIG_PAX_RANDMMAP
4176 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4177 +#endif
4178 +
4179 if (addr) {
4180 if (do_color_align)
4181 addr = COLOUR_ALIGN(addr, pgoff);
4182 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4183 addr = PAGE_ALIGN(addr);
4184
4185 vma = find_vma(mm, addr);
4186 - if (task_size - len >= addr &&
4187 - (!vma || addr + len <= vma->vm_start))
4188 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4189 return addr;
4190 }
4191
4192 if (len > mm->cached_hole_size) {
4193 - start_addr = addr = mm->free_area_cache;
4194 + start_addr = addr = mm->free_area_cache;
4195 } else {
4196 - start_addr = addr = TASK_UNMAPPED_BASE;
4197 + start_addr = addr = mm->mmap_base;
4198 mm->cached_hole_size = 0;
4199 }
4200
4201 @@ -174,14 +177,14 @@ full_search:
4202 vma = find_vma(mm, VA_EXCLUDE_END);
4203 }
4204 if (unlikely(task_size < addr)) {
4205 - if (start_addr != TASK_UNMAPPED_BASE) {
4206 - start_addr = addr = TASK_UNMAPPED_BASE;
4207 + if (start_addr != mm->mmap_base) {
4208 + start_addr = addr = mm->mmap_base;
4209 mm->cached_hole_size = 0;
4210 goto full_search;
4211 }
4212 return -ENOMEM;
4213 }
4214 - if (likely(!vma || addr + len <= vma->vm_start)) {
4215 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4216 /*
4217 * Remember the place where we stopped the search:
4218 */
4219 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4220 /* We do not accept a shared mapping if it would violate
4221 * cache aliasing constraints.
4222 */
4223 - if ((flags & MAP_SHARED) &&
4224 + if ((filp || (flags & MAP_SHARED)) &&
4225 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4226 return -EINVAL;
4227 return addr;
4228 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4229 addr = PAGE_ALIGN(addr);
4230
4231 vma = find_vma(mm, addr);
4232 - if (task_size - len >= addr &&
4233 - (!vma || addr + len <= vma->vm_start))
4234 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4235 return addr;
4236 }
4237
4238 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4239 /* make sure it can fit in the remaining address space */
4240 if (likely(addr > len)) {
4241 vma = find_vma(mm, addr-len);
4242 - if (!vma || addr <= vma->vm_start) {
4243 + if (check_heap_stack_gap(vma, addr - len, len)) {
4244 /* remember the address as a hint for next time */
4245 return (mm->free_area_cache = addr-len);
4246 }
4247 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4248 if (unlikely(mm->mmap_base < len))
4249 goto bottomup;
4250
4251 - addr = mm->mmap_base-len;
4252 - if (do_color_align)
4253 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4254 + addr = mm->mmap_base - len;
4255
4256 do {
4257 + if (do_color_align)
4258 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4259 /*
4260 * Lookup failure means no vma is above this address,
4261 * else if new region fits below vma->vm_start,
4262 * return with success:
4263 */
4264 vma = find_vma(mm, addr);
4265 - if (likely(!vma || addr+len <= vma->vm_start)) {
4266 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4267 /* remember the address as a hint for next time */
4268 return (mm->free_area_cache = addr);
4269 }
4270 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 mm->cached_hole_size = vma->vm_start - addr;
4272
4273 /* try just below the current vma->vm_start */
4274 - addr = vma->vm_start-len;
4275 - if (do_color_align)
4276 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4277 - } while (likely(len < vma->vm_start));
4278 + addr = skip_heap_stack_gap(vma, len);
4279 + } while (!IS_ERR_VALUE(addr));
4280
4281 bottomup:
4282 /*
4283 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4284 gap == RLIM_INFINITY ||
4285 sysctl_legacy_va_layout) {
4286 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4287 +
4288 +#ifdef CONFIG_PAX_RANDMMAP
4289 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4290 + mm->mmap_base += mm->delta_mmap;
4291 +#endif
4292 +
4293 mm->get_unmapped_area = arch_get_unmapped_area;
4294 mm->unmap_area = arch_unmap_area;
4295 } else {
4296 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4297 gap = (task_size / 6 * 5);
4298
4299 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4300 +
4301 +#ifdef CONFIG_PAX_RANDMMAP
4302 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4303 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4304 +#endif
4305 +
4306 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4307 mm->unmap_area = arch_unmap_area_topdown;
4308 }
4309 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4310 index 591f20c..0f1b925 100644
4311 --- a/arch/sparc/kernel/traps_32.c
4312 +++ b/arch/sparc/kernel/traps_32.c
4313 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4314 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4315 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4316
4317 +extern void gr_handle_kernel_exploit(void);
4318 +
4319 void die_if_kernel(char *str, struct pt_regs *regs)
4320 {
4321 static int die_counter;
4322 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4323 count++ < 30 &&
4324 (((unsigned long) rw) >= PAGE_OFFSET) &&
4325 !(((unsigned long) rw) & 0x7)) {
4326 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4327 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4328 (void *) rw->ins[7]);
4329 rw = (struct reg_window32 *)rw->ins[6];
4330 }
4331 }
4332 printk("Instruction DUMP:");
4333 instruction_dump ((unsigned long *) regs->pc);
4334 - if(regs->psr & PSR_PS)
4335 + if(regs->psr & PSR_PS) {
4336 + gr_handle_kernel_exploit();
4337 do_exit(SIGKILL);
4338 + }
4339 do_exit(SIGSEGV);
4340 }
4341
4342 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4343 index 0cbdaa4..438e4c9 100644
4344 --- a/arch/sparc/kernel/traps_64.c
4345 +++ b/arch/sparc/kernel/traps_64.c
4346 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4347 i + 1,
4348 p->trapstack[i].tstate, p->trapstack[i].tpc,
4349 p->trapstack[i].tnpc, p->trapstack[i].tt);
4350 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4351 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4352 }
4353 }
4354
4355 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4356
4357 lvl -= 0x100;
4358 if (regs->tstate & TSTATE_PRIV) {
4359 +
4360 +#ifdef CONFIG_PAX_REFCOUNT
4361 + if (lvl == 6)
4362 + pax_report_refcount_overflow(regs);
4363 +#endif
4364 +
4365 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4366 die_if_kernel(buffer, regs);
4367 }
4368 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4369 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4370 {
4371 char buffer[32];
4372 -
4373 +
4374 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4375 0, lvl, SIGTRAP) == NOTIFY_STOP)
4376 return;
4377
4378 +#ifdef CONFIG_PAX_REFCOUNT
4379 + if (lvl == 6)
4380 + pax_report_refcount_overflow(regs);
4381 +#endif
4382 +
4383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4384
4385 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4386 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4387 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4388 printk("%s" "ERROR(%d): ",
4389 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4390 - printk("TPC<%pS>\n", (void *) regs->tpc);
4391 + printk("TPC<%pA>\n", (void *) regs->tpc);
4392 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4393 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4394 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4395 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4396 smp_processor_id(),
4397 (type & 0x1) ? 'I' : 'D',
4398 regs->tpc);
4399 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4400 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4401 panic("Irrecoverable Cheetah+ parity error.");
4402 }
4403
4404 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4405 smp_processor_id(),
4406 (type & 0x1) ? 'I' : 'D',
4407 regs->tpc);
4408 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4409 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4410 }
4411
4412 struct sun4v_error_entry {
4413 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4414
4415 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4416 regs->tpc, tl);
4417 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4418 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4419 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4420 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4421 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4422 (void *) regs->u_regs[UREG_I7]);
4423 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4424 "pte[%lx] error[%lx]\n",
4425 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4426
4427 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4428 regs->tpc, tl);
4429 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4430 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4431 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4432 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4433 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4434 (void *) regs->u_regs[UREG_I7]);
4435 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4436 "pte[%lx] error[%lx]\n",
4437 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4438 fp = (unsigned long)sf->fp + STACK_BIAS;
4439 }
4440
4441 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4442 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4444 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4445 int index = tsk->curr_ret_stack;
4446 if (tsk->ret_stack && index >= graph) {
4447 pc = tsk->ret_stack[index - graph].ret;
4448 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4449 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4450 graph++;
4451 }
4452 }
4453 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4454 return (struct reg_window *) (fp + STACK_BIAS);
4455 }
4456
4457 +extern void gr_handle_kernel_exploit(void);
4458 +
4459 void die_if_kernel(char *str, struct pt_regs *regs)
4460 {
4461 static int die_counter;
4462 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4463 while (rw &&
4464 count++ < 30 &&
4465 kstack_valid(tp, (unsigned long) rw)) {
4466 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4467 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4468 (void *) rw->ins[7]);
4469
4470 rw = kernel_stack_up(rw);
4471 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4472 }
4473 user_instruction_dump ((unsigned int __user *) regs->tpc);
4474 }
4475 - if (regs->tstate & TSTATE_PRIV)
4476 + if (regs->tstate & TSTATE_PRIV) {
4477 + gr_handle_kernel_exploit();
4478 do_exit(SIGKILL);
4479 + }
4480 do_exit(SIGSEGV);
4481 }
4482 EXPORT_SYMBOL(die_if_kernel);
4483 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4484 index 76e4ac1..78f8bb1 100644
4485 --- a/arch/sparc/kernel/unaligned_64.c
4486 +++ b/arch/sparc/kernel/unaligned_64.c
4487 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4488 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4489
4490 if (__ratelimit(&ratelimit)) {
4491 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4492 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4493 regs->tpc, (void *) regs->tpc);
4494 }
4495 }
4496 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4497 index a3fc437..fea9957 100644
4498 --- a/arch/sparc/lib/Makefile
4499 +++ b/arch/sparc/lib/Makefile
4500 @@ -2,7 +2,7 @@
4501 #
4502
4503 asflags-y := -ansi -DST_DIV0=0x02
4504 -ccflags-y := -Werror
4505 +#ccflags-y := -Werror
4506
4507 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4508 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4509 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4510 index 59186e0..f747d7a 100644
4511 --- a/arch/sparc/lib/atomic_64.S
4512 +++ b/arch/sparc/lib/atomic_64.S
4513 @@ -18,7 +18,12 @@
4514 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4515 BACKOFF_SETUP(%o2)
4516 1: lduw [%o1], %g1
4517 - add %g1, %o0, %g7
4518 + addcc %g1, %o0, %g7
4519 +
4520 +#ifdef CONFIG_PAX_REFCOUNT
4521 + tvs %icc, 6
4522 +#endif
4523 +
4524 cas [%o1], %g1, %g7
4525 cmp %g1, %g7
4526 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4527 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4528 2: BACKOFF_SPIN(%o2, %o3, 1b)
4529 .size atomic_add, .-atomic_add
4530
4531 + .globl atomic_add_unchecked
4532 + .type atomic_add_unchecked,#function
4533 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4534 + BACKOFF_SETUP(%o2)
4535 +1: lduw [%o1], %g1
4536 + add %g1, %o0, %g7
4537 + cas [%o1], %g1, %g7
4538 + cmp %g1, %g7
4539 + bne,pn %icc, 2f
4540 + nop
4541 + retl
4542 + nop
4543 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4544 + .size atomic_add_unchecked, .-atomic_add_unchecked
4545 +
4546 .globl atomic_sub
4547 .type atomic_sub,#function
4548 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4549 BACKOFF_SETUP(%o2)
4550 1: lduw [%o1], %g1
4551 - sub %g1, %o0, %g7
4552 + subcc %g1, %o0, %g7
4553 +
4554 +#ifdef CONFIG_PAX_REFCOUNT
4555 + tvs %icc, 6
4556 +#endif
4557 +
4558 cas [%o1], %g1, %g7
4559 cmp %g1, %g7
4560 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4561 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4562 2: BACKOFF_SPIN(%o2, %o3, 1b)
4563 .size atomic_sub, .-atomic_sub
4564
4565 + .globl atomic_sub_unchecked
4566 + .type atomic_sub_unchecked,#function
4567 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4568 + BACKOFF_SETUP(%o2)
4569 +1: lduw [%o1], %g1
4570 + sub %g1, %o0, %g7
4571 + cas [%o1], %g1, %g7
4572 + cmp %g1, %g7
4573 + bne,pn %icc, 2f
4574 + nop
4575 + retl
4576 + nop
4577 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4578 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4579 +
4580 .globl atomic_add_ret
4581 .type atomic_add_ret,#function
4582 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4583 BACKOFF_SETUP(%o2)
4584 1: lduw [%o1], %g1
4585 - add %g1, %o0, %g7
4586 + addcc %g1, %o0, %g7
4587 +
4588 +#ifdef CONFIG_PAX_REFCOUNT
4589 + tvs %icc, 6
4590 +#endif
4591 +
4592 cas [%o1], %g1, %g7
4593 cmp %g1, %g7
4594 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4595 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4596 2: BACKOFF_SPIN(%o2, %o3, 1b)
4597 .size atomic_add_ret, .-atomic_add_ret
4598
4599 + .globl atomic_add_ret_unchecked
4600 + .type atomic_add_ret_unchecked,#function
4601 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4602 + BACKOFF_SETUP(%o2)
4603 +1: lduw [%o1], %g1
4604 + addcc %g1, %o0, %g7
4605 + cas [%o1], %g1, %g7
4606 + cmp %g1, %g7
4607 + bne,pn %icc, 2f
4608 + add %g7, %o0, %g7
4609 + sra %g7, 0, %o0
4610 + retl
4611 + nop
4612 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4613 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4614 +
4615 .globl atomic_sub_ret
4616 .type atomic_sub_ret,#function
4617 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4618 BACKOFF_SETUP(%o2)
4619 1: lduw [%o1], %g1
4620 - sub %g1, %o0, %g7
4621 + subcc %g1, %o0, %g7
4622 +
4623 +#ifdef CONFIG_PAX_REFCOUNT
4624 + tvs %icc, 6
4625 +#endif
4626 +
4627 cas [%o1], %g1, %g7
4628 cmp %g1, %g7
4629 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4630 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4631 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4632 BACKOFF_SETUP(%o2)
4633 1: ldx [%o1], %g1
4634 - add %g1, %o0, %g7
4635 + addcc %g1, %o0, %g7
4636 +
4637 +#ifdef CONFIG_PAX_REFCOUNT
4638 + tvs %xcc, 6
4639 +#endif
4640 +
4641 casx [%o1], %g1, %g7
4642 cmp %g1, %g7
4643 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4644 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4645 2: BACKOFF_SPIN(%o2, %o3, 1b)
4646 .size atomic64_add, .-atomic64_add
4647
4648 + .globl atomic64_add_unchecked
4649 + .type atomic64_add_unchecked,#function
4650 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4651 + BACKOFF_SETUP(%o2)
4652 +1: ldx [%o1], %g1
4653 + addcc %g1, %o0, %g7
4654 + casx [%o1], %g1, %g7
4655 + cmp %g1, %g7
4656 + bne,pn %xcc, 2f
4657 + nop
4658 + retl
4659 + nop
4660 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4661 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4662 +
4663 .globl atomic64_sub
4664 .type atomic64_sub,#function
4665 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4666 BACKOFF_SETUP(%o2)
4667 1: ldx [%o1], %g1
4668 - sub %g1, %o0, %g7
4669 + subcc %g1, %o0, %g7
4670 +
4671 +#ifdef CONFIG_PAX_REFCOUNT
4672 + tvs %xcc, 6
4673 +#endif
4674 +
4675 casx [%o1], %g1, %g7
4676 cmp %g1, %g7
4677 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4678 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4679 2: BACKOFF_SPIN(%o2, %o3, 1b)
4680 .size atomic64_sub, .-atomic64_sub
4681
4682 + .globl atomic64_sub_unchecked
4683 + .type atomic64_sub_unchecked,#function
4684 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4685 + BACKOFF_SETUP(%o2)
4686 +1: ldx [%o1], %g1
4687 + subcc %g1, %o0, %g7
4688 + casx [%o1], %g1, %g7
4689 + cmp %g1, %g7
4690 + bne,pn %xcc, 2f
4691 + nop
4692 + retl
4693 + nop
4694 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4695 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4696 +
4697 .globl atomic64_add_ret
4698 .type atomic64_add_ret,#function
4699 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4700 BACKOFF_SETUP(%o2)
4701 1: ldx [%o1], %g1
4702 - add %g1, %o0, %g7
4703 + addcc %g1, %o0, %g7
4704 +
4705 +#ifdef CONFIG_PAX_REFCOUNT
4706 + tvs %xcc, 6
4707 +#endif
4708 +
4709 casx [%o1], %g1, %g7
4710 cmp %g1, %g7
4711 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4712 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4713 2: BACKOFF_SPIN(%o2, %o3, 1b)
4714 .size atomic64_add_ret, .-atomic64_add_ret
4715
4716 + .globl atomic64_add_ret_unchecked
4717 + .type atomic64_add_ret_unchecked,#function
4718 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4719 + BACKOFF_SETUP(%o2)
4720 +1: ldx [%o1], %g1
4721 + addcc %g1, %o0, %g7
4722 + casx [%o1], %g1, %g7
4723 + cmp %g1, %g7
4724 + bne,pn %xcc, 2f
4725 + add %g7, %o0, %g7
4726 + mov %g7, %o0
4727 + retl
4728 + nop
4729 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4730 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4731 +
4732 .globl atomic64_sub_ret
4733 .type atomic64_sub_ret,#function
4734 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4735 BACKOFF_SETUP(%o2)
4736 1: ldx [%o1], %g1
4737 - sub %g1, %o0, %g7
4738 + subcc %g1, %o0, %g7
4739 +
4740 +#ifdef CONFIG_PAX_REFCOUNT
4741 + tvs %xcc, 6
4742 +#endif
4743 +
4744 casx [%o1], %g1, %g7
4745 cmp %g1, %g7
4746 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4747 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4748 index 1b30bb3..b4a16c7 100644
4749 --- a/arch/sparc/lib/ksyms.c
4750 +++ b/arch/sparc/lib/ksyms.c
4751 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4752
4753 /* Atomic counter implementation. */
4754 EXPORT_SYMBOL(atomic_add);
4755 +EXPORT_SYMBOL(atomic_add_unchecked);
4756 EXPORT_SYMBOL(atomic_add_ret);
4757 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4758 EXPORT_SYMBOL(atomic_sub);
4759 +EXPORT_SYMBOL(atomic_sub_unchecked);
4760 EXPORT_SYMBOL(atomic_sub_ret);
4761 EXPORT_SYMBOL(atomic64_add);
4762 +EXPORT_SYMBOL(atomic64_add_unchecked);
4763 EXPORT_SYMBOL(atomic64_add_ret);
4764 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4765 EXPORT_SYMBOL(atomic64_sub);
4766 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4767 EXPORT_SYMBOL(atomic64_sub_ret);
4768
4769 /* Atomic bit operations. */
4770 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4771 index 301421c..e2535d1 100644
4772 --- a/arch/sparc/mm/Makefile
4773 +++ b/arch/sparc/mm/Makefile
4774 @@ -2,7 +2,7 @@
4775 #
4776
4777 asflags-y := -ansi
4778 -ccflags-y := -Werror
4779 +#ccflags-y := -Werror
4780
4781 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4782 obj-y += fault_$(BITS).o
4783 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4784 index 8023fd7..c8e89e9 100644
4785 --- a/arch/sparc/mm/fault_32.c
4786 +++ b/arch/sparc/mm/fault_32.c
4787 @@ -21,6 +21,9 @@
4788 #include <linux/perf_event.h>
4789 #include <linux/interrupt.h>
4790 #include <linux/kdebug.h>
4791 +#include <linux/slab.h>
4792 +#include <linux/pagemap.h>
4793 +#include <linux/compiler.h>
4794
4795 #include <asm/system.h>
4796 #include <asm/page.h>
4797 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4798 return safe_compute_effective_address(regs, insn);
4799 }
4800
4801 +#ifdef CONFIG_PAX_PAGEEXEC
4802 +#ifdef CONFIG_PAX_DLRESOLVE
4803 +static void pax_emuplt_close(struct vm_area_struct *vma)
4804 +{
4805 + vma->vm_mm->call_dl_resolve = 0UL;
4806 +}
4807 +
4808 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4809 +{
4810 + unsigned int *kaddr;
4811 +
4812 + vmf->page = alloc_page(GFP_HIGHUSER);
4813 + if (!vmf->page)
4814 + return VM_FAULT_OOM;
4815 +
4816 + kaddr = kmap(vmf->page);
4817 + memset(kaddr, 0, PAGE_SIZE);
4818 + kaddr[0] = 0x9DE3BFA8U; /* save */
4819 + flush_dcache_page(vmf->page);
4820 + kunmap(vmf->page);
4821 + return VM_FAULT_MAJOR;
4822 +}
4823 +
4824 +static const struct vm_operations_struct pax_vm_ops = {
4825 + .close = pax_emuplt_close,
4826 + .fault = pax_emuplt_fault
4827 +};
4828 +
4829 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4830 +{
4831 + int ret;
4832 +
4833 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4834 + vma->vm_mm = current->mm;
4835 + vma->vm_start = addr;
4836 + vma->vm_end = addr + PAGE_SIZE;
4837 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4838 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4839 + vma->vm_ops = &pax_vm_ops;
4840 +
4841 + ret = insert_vm_struct(current->mm, vma);
4842 + if (ret)
4843 + return ret;
4844 +
4845 + ++current->mm->total_vm;
4846 + return 0;
4847 +}
4848 +#endif
4849 +
4850 +/*
4851 + * PaX: decide what to do with offenders (regs->pc = fault address)
4852 + *
4853 + * returns 1 when task should be killed
4854 + * 2 when patched PLT trampoline was detected
4855 + * 3 when unpatched PLT trampoline was detected
4856 + */
4857 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4858 +{
4859 +
4860 +#ifdef CONFIG_PAX_EMUPLT
4861 + int err;
4862 +
4863 + do { /* PaX: patched PLT emulation #1 */
4864 + unsigned int sethi1, sethi2, jmpl;
4865 +
4866 + err = get_user(sethi1, (unsigned int *)regs->pc);
4867 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4868 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4869 +
4870 + if (err)
4871 + break;
4872 +
4873 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4874 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4875 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4876 + {
4877 + unsigned int addr;
4878 +
4879 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4880 + addr = regs->u_regs[UREG_G1];
4881 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4882 + regs->pc = addr;
4883 + regs->npc = addr+4;
4884 + return 2;
4885 + }
4886 + } while (0);
4887 +
4888 + { /* PaX: patched PLT emulation #2 */
4889 + unsigned int ba;
4890 +
4891 + err = get_user(ba, (unsigned int *)regs->pc);
4892 +
4893 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4894 + unsigned int addr;
4895 +
4896 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4897 + regs->pc = addr;
4898 + regs->npc = addr+4;
4899 + return 2;
4900 + }
4901 + }
4902 +
4903 + do { /* PaX: patched PLT emulation #3 */
4904 + unsigned int sethi, jmpl, nop;
4905 +
4906 + err = get_user(sethi, (unsigned int *)regs->pc);
4907 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4908 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4909 +
4910 + if (err)
4911 + break;
4912 +
4913 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4914 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4915 + nop == 0x01000000U)
4916 + {
4917 + unsigned int addr;
4918 +
4919 + addr = (sethi & 0x003FFFFFU) << 10;
4920 + regs->u_regs[UREG_G1] = addr;
4921 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4922 + regs->pc = addr;
4923 + regs->npc = addr+4;
4924 + return 2;
4925 + }
4926 + } while (0);
4927 +
4928 + do { /* PaX: unpatched PLT emulation step 1 */
4929 + unsigned int sethi, ba, nop;
4930 +
4931 + err = get_user(sethi, (unsigned int *)regs->pc);
4932 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4933 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4934 +
4935 + if (err)
4936 + break;
4937 +
4938 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4939 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4940 + nop == 0x01000000U)
4941 + {
4942 + unsigned int addr, save, call;
4943 +
4944 + if ((ba & 0xFFC00000U) == 0x30800000U)
4945 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4946 + else
4947 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4948 +
4949 + err = get_user(save, (unsigned int *)addr);
4950 + err |= get_user(call, (unsigned int *)(addr+4));
4951 + err |= get_user(nop, (unsigned int *)(addr+8));
4952 + if (err)
4953 + break;
4954 +
4955 +#ifdef CONFIG_PAX_DLRESOLVE
4956 + if (save == 0x9DE3BFA8U &&
4957 + (call & 0xC0000000U) == 0x40000000U &&
4958 + nop == 0x01000000U)
4959 + {
4960 + struct vm_area_struct *vma;
4961 + unsigned long call_dl_resolve;
4962 +
4963 + down_read(&current->mm->mmap_sem);
4964 + call_dl_resolve = current->mm->call_dl_resolve;
4965 + up_read(&current->mm->mmap_sem);
4966 + if (likely(call_dl_resolve))
4967 + goto emulate;
4968 +
4969 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4970 +
4971 + down_write(&current->mm->mmap_sem);
4972 + if (current->mm->call_dl_resolve) {
4973 + call_dl_resolve = current->mm->call_dl_resolve;
4974 + up_write(&current->mm->mmap_sem);
4975 + if (vma)
4976 + kmem_cache_free(vm_area_cachep, vma);
4977 + goto emulate;
4978 + }
4979 +
4980 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4981 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4982 + up_write(&current->mm->mmap_sem);
4983 + if (vma)
4984 + kmem_cache_free(vm_area_cachep, vma);
4985 + return 1;
4986 + }
4987 +
4988 + if (pax_insert_vma(vma, call_dl_resolve)) {
4989 + up_write(&current->mm->mmap_sem);
4990 + kmem_cache_free(vm_area_cachep, vma);
4991 + return 1;
4992 + }
4993 +
4994 + current->mm->call_dl_resolve = call_dl_resolve;
4995 + up_write(&current->mm->mmap_sem);
4996 +
4997 +emulate:
4998 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4999 + regs->pc = call_dl_resolve;
5000 + regs->npc = addr+4;
5001 + return 3;
5002 + }
5003 +#endif
5004 +
5005 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5006 + if ((save & 0xFFC00000U) == 0x05000000U &&
5007 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5008 + nop == 0x01000000U)
5009 + {
5010 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5011 + regs->u_regs[UREG_G2] = addr + 4;
5012 + addr = (save & 0x003FFFFFU) << 10;
5013 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5014 + regs->pc = addr;
5015 + regs->npc = addr+4;
5016 + return 3;
5017 + }
5018 + }
5019 + } while (0);
5020 +
5021 + do { /* PaX: unpatched PLT emulation step 2 */
5022 + unsigned int save, call, nop;
5023 +
5024 + err = get_user(save, (unsigned int *)(regs->pc-4));
5025 + err |= get_user(call, (unsigned int *)regs->pc);
5026 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5027 + if (err)
5028 + break;
5029 +
5030 + if (save == 0x9DE3BFA8U &&
5031 + (call & 0xC0000000U) == 0x40000000U &&
5032 + nop == 0x01000000U)
5033 + {
5034 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5035 +
5036 + regs->u_regs[UREG_RETPC] = regs->pc;
5037 + regs->pc = dl_resolve;
5038 + regs->npc = dl_resolve+4;
5039 + return 3;
5040 + }
5041 + } while (0);
5042 +#endif
5043 +
5044 + return 1;
5045 +}
5046 +
5047 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5048 +{
5049 + unsigned long i;
5050 +
5051 + printk(KERN_ERR "PAX: bytes at PC: ");
5052 + for (i = 0; i < 8; i++) {
5053 + unsigned int c;
5054 + if (get_user(c, (unsigned int *)pc+i))
5055 + printk(KERN_CONT "???????? ");
5056 + else
5057 + printk(KERN_CONT "%08x ", c);
5058 + }
5059 + printk("\n");
5060 +}
5061 +#endif
5062 +
5063 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
5064 int text_fault)
5065 {
5066 @@ -280,6 +545,24 @@ good_area:
5067 if(!(vma->vm_flags & VM_WRITE))
5068 goto bad_area;
5069 } else {
5070 +
5071 +#ifdef CONFIG_PAX_PAGEEXEC
5072 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5073 + up_read(&mm->mmap_sem);
5074 + switch (pax_handle_fetch_fault(regs)) {
5075 +
5076 +#ifdef CONFIG_PAX_EMUPLT
5077 + case 2:
5078 + case 3:
5079 + return;
5080 +#endif
5081 +
5082 + }
5083 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5084 + do_group_exit(SIGKILL);
5085 + }
5086 +#endif
5087 +
5088 /* Allow reads even for write-only mappings */
5089 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5090 goto bad_area;
5091 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
5092 index 504c062..6fcb9c6 100644
5093 --- a/arch/sparc/mm/fault_64.c
5094 +++ b/arch/sparc/mm/fault_64.c
5095 @@ -21,6 +21,9 @@
5096 #include <linux/kprobes.h>
5097 #include <linux/kdebug.h>
5098 #include <linux/percpu.h>
5099 +#include <linux/slab.h>
5100 +#include <linux/pagemap.h>
5101 +#include <linux/compiler.h>
5102
5103 #include <asm/page.h>
5104 #include <asm/pgtable.h>
5105 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
5106 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5107 regs->tpc);
5108 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5109 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5110 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5111 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5112 dump_stack();
5113 unhandled_fault(regs->tpc, current, regs);
5114 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
5115 show_regs(regs);
5116 }
5117
5118 +#ifdef CONFIG_PAX_PAGEEXEC
5119 +#ifdef CONFIG_PAX_DLRESOLVE
5120 +static void pax_emuplt_close(struct vm_area_struct *vma)
5121 +{
5122 + vma->vm_mm->call_dl_resolve = 0UL;
5123 +}
5124 +
5125 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5126 +{
5127 + unsigned int *kaddr;
5128 +
5129 + vmf->page = alloc_page(GFP_HIGHUSER);
5130 + if (!vmf->page)
5131 + return VM_FAULT_OOM;
5132 +
5133 + kaddr = kmap(vmf->page);
5134 + memset(kaddr, 0, PAGE_SIZE);
5135 + kaddr[0] = 0x9DE3BFA8U; /* save */
5136 + flush_dcache_page(vmf->page);
5137 + kunmap(vmf->page);
5138 + return VM_FAULT_MAJOR;
5139 +}
5140 +
5141 +static const struct vm_operations_struct pax_vm_ops = {
5142 + .close = pax_emuplt_close,
5143 + .fault = pax_emuplt_fault
5144 +};
5145 +
5146 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5147 +{
5148 + int ret;
5149 +
5150 + INIT_LIST_HEAD(&vma->anon_vma_chain);
5151 + vma->vm_mm = current->mm;
5152 + vma->vm_start = addr;
5153 + vma->vm_end = addr + PAGE_SIZE;
5154 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5155 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5156 + vma->vm_ops = &pax_vm_ops;
5157 +
5158 + ret = insert_vm_struct(current->mm, vma);
5159 + if (ret)
5160 + return ret;
5161 +
5162 + ++current->mm->total_vm;
5163 + return 0;
5164 +}
5165 +#endif
5166 +
5167 +/*
5168 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5169 + *
5170 + * returns 1 when task should be killed
5171 + * 2 when patched PLT trampoline was detected
5172 + * 3 when unpatched PLT trampoline was detected
5173 + */
5174 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5175 +{
5176 +
5177 +#ifdef CONFIG_PAX_EMUPLT
5178 + int err;
5179 +
5180 + do { /* PaX: patched PLT emulation #1 */
5181 + unsigned int sethi1, sethi2, jmpl;
5182 +
5183 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5184 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5185 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5186 +
5187 + if (err)
5188 + break;
5189 +
5190 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5191 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5192 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5193 + {
5194 + unsigned long addr;
5195 +
5196 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5197 + addr = regs->u_regs[UREG_G1];
5198 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5199 +
5200 + if (test_thread_flag(TIF_32BIT))
5201 + addr &= 0xFFFFFFFFUL;
5202 +
5203 + regs->tpc = addr;
5204 + regs->tnpc = addr+4;
5205 + return 2;
5206 + }
5207 + } while (0);
5208 +
5209 + { /* PaX: patched PLT emulation #2 */
5210 + unsigned int ba;
5211 +
5212 + err = get_user(ba, (unsigned int *)regs->tpc);
5213 +
5214 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5215 + unsigned long addr;
5216 +
5217 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5218 +
5219 + if (test_thread_flag(TIF_32BIT))
5220 + addr &= 0xFFFFFFFFUL;
5221 +
5222 + regs->tpc = addr;
5223 + regs->tnpc = addr+4;
5224 + return 2;
5225 + }
5226 + }
5227 +
5228 + do { /* PaX: patched PLT emulation #3 */
5229 + unsigned int sethi, jmpl, nop;
5230 +
5231 + err = get_user(sethi, (unsigned int *)regs->tpc);
5232 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5233 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5234 +
5235 + if (err)
5236 + break;
5237 +
5238 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5239 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5240 + nop == 0x01000000U)
5241 + {
5242 + unsigned long addr;
5243 +
5244 + addr = (sethi & 0x003FFFFFU) << 10;
5245 + regs->u_regs[UREG_G1] = addr;
5246 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5247 +
5248 + if (test_thread_flag(TIF_32BIT))
5249 + addr &= 0xFFFFFFFFUL;
5250 +
5251 + regs->tpc = addr;
5252 + regs->tnpc = addr+4;
5253 + return 2;
5254 + }
5255 + } while (0);
5256 +
5257 + do { /* PaX: patched PLT emulation #4 */
5258 + unsigned int sethi, mov1, call, mov2;
5259 +
5260 + err = get_user(sethi, (unsigned int *)regs->tpc);
5261 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5262 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5263 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5264 +
5265 + if (err)
5266 + break;
5267 +
5268 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5269 + mov1 == 0x8210000FU &&
5270 + (call & 0xC0000000U) == 0x40000000U &&
5271 + mov2 == 0x9E100001U)
5272 + {
5273 + unsigned long addr;
5274 +
5275 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5276 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5277 +
5278 + if (test_thread_flag(TIF_32BIT))
5279 + addr &= 0xFFFFFFFFUL;
5280 +
5281 + regs->tpc = addr;
5282 + regs->tnpc = addr+4;
5283 + return 2;
5284 + }
5285 + } while (0);
5286 +
5287 + do { /* PaX: patched PLT emulation #5 */
5288 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5289 +
5290 + err = get_user(sethi, (unsigned int *)regs->tpc);
5291 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5292 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5293 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5294 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5295 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5296 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5297 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5298 +
5299 + if (err)
5300 + break;
5301 +
5302 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5303 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5304 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5305 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5306 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5307 + sllx == 0x83287020U &&
5308 + jmpl == 0x81C04005U &&
5309 + nop == 0x01000000U)
5310 + {
5311 + unsigned long addr;
5312 +
5313 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5314 + regs->u_regs[UREG_G1] <<= 32;
5315 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5316 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5317 + regs->tpc = addr;
5318 + regs->tnpc = addr+4;
5319 + return 2;
5320 + }
5321 + } while (0);
5322 +
5323 + do { /* PaX: patched PLT emulation #6 */
5324 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5325 +
5326 + err = get_user(sethi, (unsigned int *)regs->tpc);
5327 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5328 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5329 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5330 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5331 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5332 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5333 +
5334 + if (err)
5335 + break;
5336 +
5337 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5338 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5339 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5340 + sllx == 0x83287020U &&
5341 + (or & 0xFFFFE000U) == 0x8A116000U &&
5342 + jmpl == 0x81C04005U &&
5343 + nop == 0x01000000U)
5344 + {
5345 + unsigned long addr;
5346 +
5347 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5348 + regs->u_regs[UREG_G1] <<= 32;
5349 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5350 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5351 + regs->tpc = addr;
5352 + regs->tnpc = addr+4;
5353 + return 2;
5354 + }
5355 + } while (0);
5356 +
5357 + do { /* PaX: unpatched PLT emulation step 1 */
5358 + unsigned int sethi, ba, nop;
5359 +
5360 + err = get_user(sethi, (unsigned int *)regs->tpc);
5361 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5362 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5363 +
5364 + if (err)
5365 + break;
5366 +
5367 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5368 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5369 + nop == 0x01000000U)
5370 + {
5371 + unsigned long addr;
5372 + unsigned int save, call;
5373 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5374 +
5375 + if ((ba & 0xFFC00000U) == 0x30800000U)
5376 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5377 + else
5378 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5379 +
5380 + if (test_thread_flag(TIF_32BIT))
5381 + addr &= 0xFFFFFFFFUL;
5382 +
5383 + err = get_user(save, (unsigned int *)addr);
5384 + err |= get_user(call, (unsigned int *)(addr+4));
5385 + err |= get_user(nop, (unsigned int *)(addr+8));
5386 + if (err)
5387 + break;
5388 +
5389 +#ifdef CONFIG_PAX_DLRESOLVE
5390 + if (save == 0x9DE3BFA8U &&
5391 + (call & 0xC0000000U) == 0x40000000U &&
5392 + nop == 0x01000000U)
5393 + {
5394 + struct vm_area_struct *vma;
5395 + unsigned long call_dl_resolve;
5396 +
5397 + down_read(&current->mm->mmap_sem);
5398 + call_dl_resolve = current->mm->call_dl_resolve;
5399 + up_read(&current->mm->mmap_sem);
5400 + if (likely(call_dl_resolve))
5401 + goto emulate;
5402 +
5403 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5404 +
5405 + down_write(&current->mm->mmap_sem);
5406 + if (current->mm->call_dl_resolve) {
5407 + call_dl_resolve = current->mm->call_dl_resolve;
5408 + up_write(&current->mm->mmap_sem);
5409 + if (vma)
5410 + kmem_cache_free(vm_area_cachep, vma);
5411 + goto emulate;
5412 + }
5413 +
5414 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5415 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5416 + up_write(&current->mm->mmap_sem);
5417 + if (vma)
5418 + kmem_cache_free(vm_area_cachep, vma);
5419 + return 1;
5420 + }
5421 +
5422 + if (pax_insert_vma(vma, call_dl_resolve)) {
5423 + up_write(&current->mm->mmap_sem);
5424 + kmem_cache_free(vm_area_cachep, vma);
5425 + return 1;
5426 + }
5427 +
5428 + current->mm->call_dl_resolve = call_dl_resolve;
5429 + up_write(&current->mm->mmap_sem);
5430 +
5431 +emulate:
5432 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5433 + regs->tpc = call_dl_resolve;
5434 + regs->tnpc = addr+4;
5435 + return 3;
5436 + }
5437 +#endif
5438 +
5439 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5440 + if ((save & 0xFFC00000U) == 0x05000000U &&
5441 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5442 + nop == 0x01000000U)
5443 + {
5444 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5445 + regs->u_regs[UREG_G2] = addr + 4;
5446 + addr = (save & 0x003FFFFFU) << 10;
5447 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5448 +
5449 + if (test_thread_flag(TIF_32BIT))
5450 + addr &= 0xFFFFFFFFUL;
5451 +
5452 + regs->tpc = addr;
5453 + regs->tnpc = addr+4;
5454 + return 3;
5455 + }
5456 +
5457 + /* PaX: 64-bit PLT stub */
5458 + err = get_user(sethi1, (unsigned int *)addr);
5459 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5460 + err |= get_user(or1, (unsigned int *)(addr+8));
5461 + err |= get_user(or2, (unsigned int *)(addr+12));
5462 + err |= get_user(sllx, (unsigned int *)(addr+16));
5463 + err |= get_user(add, (unsigned int *)(addr+20));
5464 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5465 + err |= get_user(nop, (unsigned int *)(addr+28));
5466 + if (err)
5467 + break;
5468 +
5469 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5470 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5471 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5472 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5473 + sllx == 0x89293020U &&
5474 + add == 0x8A010005U &&
5475 + jmpl == 0x89C14000U &&
5476 + nop == 0x01000000U)
5477 + {
5478 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5479 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5480 + regs->u_regs[UREG_G4] <<= 32;
5481 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5482 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5483 + regs->u_regs[UREG_G4] = addr + 24;
5484 + addr = regs->u_regs[UREG_G5];
5485 + regs->tpc = addr;
5486 + regs->tnpc = addr+4;
5487 + return 3;
5488 + }
5489 + }
5490 + } while (0);
5491 +
5492 +#ifdef CONFIG_PAX_DLRESOLVE
5493 + do { /* PaX: unpatched PLT emulation step 2 */
5494 + unsigned int save, call, nop;
5495 +
5496 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5497 + err |= get_user(call, (unsigned int *)regs->tpc);
5498 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5499 + if (err)
5500 + break;
5501 +
5502 + if (save == 0x9DE3BFA8U &&
5503 + (call & 0xC0000000U) == 0x40000000U &&
5504 + nop == 0x01000000U)
5505 + {
5506 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5507 +
5508 + if (test_thread_flag(TIF_32BIT))
5509 + dl_resolve &= 0xFFFFFFFFUL;
5510 +
5511 + regs->u_regs[UREG_RETPC] = regs->tpc;
5512 + regs->tpc = dl_resolve;
5513 + regs->tnpc = dl_resolve+4;
5514 + return 3;
5515 + }
5516 + } while (0);
5517 +#endif
5518 +
5519 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5520 + unsigned int sethi, ba, nop;
5521 +
5522 + err = get_user(sethi, (unsigned int *)regs->tpc);
5523 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5524 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5525 +
5526 + if (err)
5527 + break;
5528 +
5529 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5530 + (ba & 0xFFF00000U) == 0x30600000U &&
5531 + nop == 0x01000000U)
5532 + {
5533 + unsigned long addr;
5534 +
5535 + addr = (sethi & 0x003FFFFFU) << 10;
5536 + regs->u_regs[UREG_G1] = addr;
5537 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5538 +
5539 + if (test_thread_flag(TIF_32BIT))
5540 + addr &= 0xFFFFFFFFUL;
5541 +
5542 + regs->tpc = addr;
5543 + regs->tnpc = addr+4;
5544 + return 2;
5545 + }
5546 + } while (0);
5547 +
5548 +#endif
5549 +
5550 + return 1;
5551 +}
5552 +
5553 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5554 +{
5555 + unsigned long i;
5556 +
5557 + printk(KERN_ERR "PAX: bytes at PC: ");
5558 + for (i = 0; i < 8; i++) {
5559 + unsigned int c;
5560 + if (get_user(c, (unsigned int *)pc+i))
5561 + printk(KERN_CONT "???????? ");
5562 + else
5563 + printk(KERN_CONT "%08x ", c);
5564 + }
5565 + printk("\n");
5566 +}
5567 +#endif
5568 +
5569 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5570 {
5571 struct mm_struct *mm = current->mm;
5572 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5573 if (!vma)
5574 goto bad_area;
5575
5576 +#ifdef CONFIG_PAX_PAGEEXEC
5577 + /* PaX: detect ITLB misses on non-exec pages */
5578 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5579 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5580 + {
5581 + if (address != regs->tpc)
5582 + goto good_area;
5583 +
5584 + up_read(&mm->mmap_sem);
5585 + switch (pax_handle_fetch_fault(regs)) {
5586 +
5587 +#ifdef CONFIG_PAX_EMUPLT
5588 + case 2:
5589 + case 3:
5590 + return;
5591 +#endif
5592 +
5593 + }
5594 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5595 + do_group_exit(SIGKILL);
5596 + }
5597 +#endif
5598 +
5599 /* Pure DTLB misses do not tell us whether the fault causing
5600 * load/store/atomic was a write or not, it only says that there
5601 * was no match. So in such a case we (carefully) read the
5602 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5603 index 07e1453..0a7d9e9 100644
5604 --- a/arch/sparc/mm/hugetlbpage.c
5605 +++ b/arch/sparc/mm/hugetlbpage.c
5606 @@ -67,7 +67,7 @@ full_search:
5607 }
5608 return -ENOMEM;
5609 }
5610 - if (likely(!vma || addr + len <= vma->vm_start)) {
5611 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5612 /*
5613 * Remember the place where we stopped the search:
5614 */
5615 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5616 /* make sure it can fit in the remaining address space */
5617 if (likely(addr > len)) {
5618 vma = find_vma(mm, addr-len);
5619 - if (!vma || addr <= vma->vm_start) {
5620 + if (check_heap_stack_gap(vma, addr - len, len)) {
5621 /* remember the address as a hint for next time */
5622 return (mm->free_area_cache = addr-len);
5623 }
5624 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5625 if (unlikely(mm->mmap_base < len))
5626 goto bottomup;
5627
5628 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5629 + addr = mm->mmap_base - len;
5630
5631 do {
5632 + addr &= HPAGE_MASK;
5633 /*
5634 * Lookup failure means no vma is above this address,
5635 * else if new region fits below vma->vm_start,
5636 * return with success:
5637 */
5638 vma = find_vma(mm, addr);
5639 - if (likely(!vma || addr+len <= vma->vm_start)) {
5640 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5641 /* remember the address as a hint for next time */
5642 return (mm->free_area_cache = addr);
5643 }
5644 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5645 mm->cached_hole_size = vma->vm_start - addr;
5646
5647 /* try just below the current vma->vm_start */
5648 - addr = (vma->vm_start-len) & HPAGE_MASK;
5649 - } while (likely(len < vma->vm_start));
5650 + addr = skip_heap_stack_gap(vma, len);
5651 + } while (!IS_ERR_VALUE(addr));
5652
5653 bottomup:
5654 /*
5655 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5656 if (addr) {
5657 addr = ALIGN(addr, HPAGE_SIZE);
5658 vma = find_vma(mm, addr);
5659 - if (task_size - len >= addr &&
5660 - (!vma || addr + len <= vma->vm_start))
5661 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5662 return addr;
5663 }
5664 if (mm->get_unmapped_area == arch_get_unmapped_area)
5665 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5666 index 7b00de6..78239f4 100644
5667 --- a/arch/sparc/mm/init_32.c
5668 +++ b/arch/sparc/mm/init_32.c
5669 @@ -316,6 +316,9 @@ extern void device_scan(void);
5670 pgprot_t PAGE_SHARED __read_mostly;
5671 EXPORT_SYMBOL(PAGE_SHARED);
5672
5673 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5674 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5675 +
5676 void __init paging_init(void)
5677 {
5678 switch(sparc_cpu_model) {
5679 @@ -344,17 +347,17 @@ void __init paging_init(void)
5680
5681 /* Initialize the protection map with non-constant, MMU dependent values. */
5682 protection_map[0] = PAGE_NONE;
5683 - protection_map[1] = PAGE_READONLY;
5684 - protection_map[2] = PAGE_COPY;
5685 - protection_map[3] = PAGE_COPY;
5686 + protection_map[1] = PAGE_READONLY_NOEXEC;
5687 + protection_map[2] = PAGE_COPY_NOEXEC;
5688 + protection_map[3] = PAGE_COPY_NOEXEC;
5689 protection_map[4] = PAGE_READONLY;
5690 protection_map[5] = PAGE_READONLY;
5691 protection_map[6] = PAGE_COPY;
5692 protection_map[7] = PAGE_COPY;
5693 protection_map[8] = PAGE_NONE;
5694 - protection_map[9] = PAGE_READONLY;
5695 - protection_map[10] = PAGE_SHARED;
5696 - protection_map[11] = PAGE_SHARED;
5697 + protection_map[9] = PAGE_READONLY_NOEXEC;
5698 + protection_map[10] = PAGE_SHARED_NOEXEC;
5699 + protection_map[11] = PAGE_SHARED_NOEXEC;
5700 protection_map[12] = PAGE_READONLY;
5701 protection_map[13] = PAGE_READONLY;
5702 protection_map[14] = PAGE_SHARED;
5703 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5704 index cbef74e..c38fead 100644
5705 --- a/arch/sparc/mm/srmmu.c
5706 +++ b/arch/sparc/mm/srmmu.c
5707 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5708 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5709 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5710 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5711 +
5712 +#ifdef CONFIG_PAX_PAGEEXEC
5713 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5714 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5715 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5716 +#endif
5717 +
5718 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5719 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5720
5721 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
5722 index 27fe667..36d474c 100644
5723 --- a/arch/tile/include/asm/atomic_64.h
5724 +++ b/arch/tile/include/asm/atomic_64.h
5725 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5726
5727 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5728
5729 +#define atomic64_read_unchecked(v) atomic64_read(v)
5730 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5731 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5732 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5733 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5734 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5735 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5736 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5737 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5738 +
5739 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
5740 #define smp_mb__before_atomic_dec() smp_mb()
5741 #define smp_mb__after_atomic_dec() smp_mb()
5742 diff --git a/arch/um/Makefile b/arch/um/Makefile
5743 index 7730af6..cce5b19 100644
5744 --- a/arch/um/Makefile
5745 +++ b/arch/um/Makefile
5746 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5747 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5748 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5749
5750 +ifdef CONSTIFY_PLUGIN
5751 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5752 +endif
5753 +
5754 #This will adjust *FLAGS accordingly to the platform.
5755 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5756
5757 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5758 index 6c03acd..a5e0215 100644
5759 --- a/arch/um/include/asm/kmap_types.h
5760 +++ b/arch/um/include/asm/kmap_types.h
5761 @@ -23,6 +23,7 @@ enum km_type {
5762 KM_IRQ1,
5763 KM_SOFTIRQ0,
5764 KM_SOFTIRQ1,
5765 + KM_CLEARPAGE,
5766 KM_TYPE_NR
5767 };
5768
5769 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5770 index 7cfc3ce..cbd1a58 100644
5771 --- a/arch/um/include/asm/page.h
5772 +++ b/arch/um/include/asm/page.h
5773 @@ -14,6 +14,9 @@
5774 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5775 #define PAGE_MASK (~(PAGE_SIZE-1))
5776
5777 +#define ktla_ktva(addr) (addr)
5778 +#define ktva_ktla(addr) (addr)
5779 +
5780 #ifndef __ASSEMBLY__
5781
5782 struct page;
5783 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5784 index c533835..84db18e 100644
5785 --- a/arch/um/kernel/process.c
5786 +++ b/arch/um/kernel/process.c
5787 @@ -406,22 +406,6 @@ int singlestepping(void * t)
5788 return 2;
5789 }
5790
5791 -/*
5792 - * Only x86 and x86_64 have an arch_align_stack().
5793 - * All other arches have "#define arch_align_stack(x) (x)"
5794 - * in their asm/system.h
5795 - * As this is included in UML from asm-um/system-generic.h,
5796 - * we can use it to behave as the subarch does.
5797 - */
5798 -#ifndef arch_align_stack
5799 -unsigned long arch_align_stack(unsigned long sp)
5800 -{
5801 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5802 - sp -= get_random_int() % 8192;
5803 - return sp & ~0xf;
5804 -}
5805 -#endif
5806 -
5807 unsigned long get_wchan(struct task_struct *p)
5808 {
5809 unsigned long stack_page, sp, ip;
5810 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5811 index efb4294..61bc18c 100644
5812 --- a/arch/x86/Kconfig
5813 +++ b/arch/x86/Kconfig
5814 @@ -235,7 +235,7 @@ config X86_HT
5815
5816 config X86_32_LAZY_GS
5817 def_bool y
5818 - depends on X86_32 && !CC_STACKPROTECTOR
5819 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5820
5821 config ARCH_HWEIGHT_CFLAGS
5822 string
5823 @@ -1022,7 +1022,7 @@ choice
5824
5825 config NOHIGHMEM
5826 bool "off"
5827 - depends on !X86_NUMAQ
5828 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5829 ---help---
5830 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5831 However, the address space of 32-bit x86 processors is only 4
5832 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
5833
5834 config HIGHMEM4G
5835 bool "4GB"
5836 - depends on !X86_NUMAQ
5837 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5838 ---help---
5839 Select this if you have a 32-bit processor and between 1 and 4
5840 gigabytes of physical RAM.
5841 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5842 hex
5843 default 0xB0000000 if VMSPLIT_3G_OPT
5844 default 0x80000000 if VMSPLIT_2G
5845 - default 0x78000000 if VMSPLIT_2G_OPT
5846 + default 0x70000000 if VMSPLIT_2G_OPT
5847 default 0x40000000 if VMSPLIT_1G
5848 default 0xC0000000
5849 depends on X86_32
5850 @@ -1496,6 +1496,7 @@ config SECCOMP
5851
5852 config CC_STACKPROTECTOR
5853 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5854 + depends on X86_64 || !PAX_MEMORY_UDEREF
5855 ---help---
5856 This option turns on the -fstack-protector GCC feature. This
5857 feature puts, at the beginning of functions, a canary value on
5858 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5859 config PHYSICAL_START
5860 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5861 default "0x1000000"
5862 + range 0x400000 0x40000000
5863 ---help---
5864 This gives the physical address where the kernel is loaded.
5865
5866 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5867 config PHYSICAL_ALIGN
5868 hex "Alignment value to which kernel should be aligned" if X86_32
5869 default "0x1000000"
5870 + range 0x400000 0x1000000 if PAX_KERNEXEC
5871 range 0x2000 0x1000000
5872 ---help---
5873 This value puts the alignment restrictions on physical address
5874 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5875 Say N if you want to disable CPU hotplug.
5876
5877 config COMPAT_VDSO
5878 - def_bool y
5879 + def_bool n
5880 prompt "Compat VDSO support"
5881 depends on X86_32 || IA32_EMULATION
5882 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5883 ---help---
5884 Map the 32-bit VDSO to the predictable old-style address too.
5885
5886 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5887 index e3ca7e0..b30b28a 100644
5888 --- a/arch/x86/Kconfig.cpu
5889 +++ b/arch/x86/Kconfig.cpu
5890 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5891
5892 config X86_F00F_BUG
5893 def_bool y
5894 - depends on M586MMX || M586TSC || M586 || M486 || M386
5895 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5896
5897 config X86_INVD_BUG
5898 def_bool y
5899 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5900
5901 config X86_ALIGNMENT_16
5902 def_bool y
5903 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5904 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5905
5906 config X86_INTEL_USERCOPY
5907 def_bool y
5908 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5909 # generates cmov.
5910 config X86_CMOV
5911 def_bool y
5912 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5913 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5914
5915 config X86_MINIMUM_CPU_FAMILY
5916 int
5917 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5918 index bf56e17..05f9891 100644
5919 --- a/arch/x86/Kconfig.debug
5920 +++ b/arch/x86/Kconfig.debug
5921 @@ -81,7 +81,7 @@ config X86_PTDUMP
5922 config DEBUG_RODATA
5923 bool "Write protect kernel read-only data structures"
5924 default y
5925 - depends on DEBUG_KERNEL
5926 + depends on DEBUG_KERNEL && BROKEN
5927 ---help---
5928 Mark the kernel read-only data as write-protected in the pagetables,
5929 in order to catch accidental (and incorrect) writes to such const
5930 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5931
5932 config DEBUG_SET_MODULE_RONX
5933 bool "Set loadable kernel module data as NX and text as RO"
5934 - depends on MODULES
5935 + depends on MODULES && BROKEN
5936 ---help---
5937 This option helps catch unintended modifications to loadable
5938 kernel module's text and read-only data. It also prevents execution
5939 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5940 index b02e509..2631e48 100644
5941 --- a/arch/x86/Makefile
5942 +++ b/arch/x86/Makefile
5943 @@ -46,6 +46,7 @@ else
5944 UTS_MACHINE := x86_64
5945 CHECKFLAGS += -D__x86_64__ -m64
5946
5947 + biarch := $(call cc-option,-m64)
5948 KBUILD_AFLAGS += -m64
5949 KBUILD_CFLAGS += -m64
5950
5951 @@ -195,3 +196,12 @@ define archhelp
5952 echo ' FDARGS="..." arguments for the booted kernel'
5953 echo ' FDINITRD=file initrd for the booted kernel'
5954 endef
5955 +
5956 +define OLD_LD
5957 +
5958 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5959 +*** Please upgrade your binutils to 2.18 or newer
5960 +endef
5961 +
5962 +archprepare:
5963 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5964 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5965 index 95365a8..52f857b 100644
5966 --- a/arch/x86/boot/Makefile
5967 +++ b/arch/x86/boot/Makefile
5968 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5969 $(call cc-option, -fno-stack-protector) \
5970 $(call cc-option, -mpreferred-stack-boundary=2)
5971 KBUILD_CFLAGS += $(call cc-option, -m32)
5972 +ifdef CONSTIFY_PLUGIN
5973 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5974 +endif
5975 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5976 GCOV_PROFILE := n
5977
5978 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5979 index 878e4b9..20537ab 100644
5980 --- a/arch/x86/boot/bitops.h
5981 +++ b/arch/x86/boot/bitops.h
5982 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5983 u8 v;
5984 const u32 *p = (const u32 *)addr;
5985
5986 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5987 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5988 return v;
5989 }
5990
5991 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5992
5993 static inline void set_bit(int nr, void *addr)
5994 {
5995 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5996 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5997 }
5998
5999 #endif /* BOOT_BITOPS_H */
6000 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
6001 index c7093bd..d4247ffe0 100644
6002 --- a/arch/x86/boot/boot.h
6003 +++ b/arch/x86/boot/boot.h
6004 @@ -85,7 +85,7 @@ static inline void io_delay(void)
6005 static inline u16 ds(void)
6006 {
6007 u16 seg;
6008 - asm("movw %%ds,%0" : "=rm" (seg));
6009 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6010 return seg;
6011 }
6012
6013 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
6014 static inline int memcmp(const void *s1, const void *s2, size_t len)
6015 {
6016 u8 diff;
6017 - asm("repe; cmpsb; setnz %0"
6018 + asm volatile("repe; cmpsb; setnz %0"
6019 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6020 return diff;
6021 }
6022 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
6023 index 09664ef..edc5d03 100644
6024 --- a/arch/x86/boot/compressed/Makefile
6025 +++ b/arch/x86/boot/compressed/Makefile
6026 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
6027 KBUILD_CFLAGS += $(cflags-y)
6028 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6029 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6030 +ifdef CONSTIFY_PLUGIN
6031 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6032 +endif
6033
6034 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6035 GCOV_PROFILE := n
6036 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
6037 index 67a655a..b924059 100644
6038 --- a/arch/x86/boot/compressed/head_32.S
6039 +++ b/arch/x86/boot/compressed/head_32.S
6040 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6041 notl %eax
6042 andl %eax, %ebx
6043 #else
6044 - movl $LOAD_PHYSICAL_ADDR, %ebx
6045 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6046 #endif
6047
6048 /* Target address to relocate to for decompression */
6049 @@ -162,7 +162,7 @@ relocated:
6050 * and where it was actually loaded.
6051 */
6052 movl %ebp, %ebx
6053 - subl $LOAD_PHYSICAL_ADDR, %ebx
6054 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6055 jz 2f /* Nothing to be done if loaded at compiled addr. */
6056 /*
6057 * Process relocations.
6058 @@ -170,8 +170,7 @@ relocated:
6059
6060 1: subl $4, %edi
6061 movl (%edi), %ecx
6062 - testl %ecx, %ecx
6063 - jz 2f
6064 + jecxz 2f
6065 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6066 jmp 1b
6067 2:
6068 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
6069 index 35af09d..99c9676 100644
6070 --- a/arch/x86/boot/compressed/head_64.S
6071 +++ b/arch/x86/boot/compressed/head_64.S
6072 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6073 notl %eax
6074 andl %eax, %ebx
6075 #else
6076 - movl $LOAD_PHYSICAL_ADDR, %ebx
6077 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6078 #endif
6079
6080 /* Target address to relocate to for decompression */
6081 @@ -233,7 +233,7 @@ ENTRY(startup_64)
6082 notq %rax
6083 andq %rax, %rbp
6084 #else
6085 - movq $LOAD_PHYSICAL_ADDR, %rbp
6086 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6087 #endif
6088
6089 /* Target address to relocate to for decompression */
6090 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
6091 index 3a19d04..7c1d55a 100644
6092 --- a/arch/x86/boot/compressed/misc.c
6093 +++ b/arch/x86/boot/compressed/misc.c
6094 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
6095 case PT_LOAD:
6096 #ifdef CONFIG_RELOCATABLE
6097 dest = output;
6098 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6099 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6100 #else
6101 dest = (void *)(phdr->p_paddr);
6102 #endif
6103 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
6104 error("Destination address too large");
6105 #endif
6106 #ifndef CONFIG_RELOCATABLE
6107 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6108 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6109 error("Wrong destination address");
6110 #endif
6111
6112 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
6113 index 89bbf4e..869908e 100644
6114 --- a/arch/x86/boot/compressed/relocs.c
6115 +++ b/arch/x86/boot/compressed/relocs.c
6116 @@ -13,8 +13,11 @@
6117
6118 static void die(char *fmt, ...);
6119
6120 +#include "../../../../include/generated/autoconf.h"
6121 +
6122 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6123 static Elf32_Ehdr ehdr;
6124 +static Elf32_Phdr *phdr;
6125 static unsigned long reloc_count, reloc_idx;
6126 static unsigned long *relocs;
6127
6128 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
6129 }
6130 }
6131
6132 +static void read_phdrs(FILE *fp)
6133 +{
6134 + unsigned int i;
6135 +
6136 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6137 + if (!phdr) {
6138 + die("Unable to allocate %d program headers\n",
6139 + ehdr.e_phnum);
6140 + }
6141 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6142 + die("Seek to %d failed: %s\n",
6143 + ehdr.e_phoff, strerror(errno));
6144 + }
6145 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6146 + die("Cannot read ELF program headers: %s\n",
6147 + strerror(errno));
6148 + }
6149 + for(i = 0; i < ehdr.e_phnum; i++) {
6150 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6151 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6152 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6153 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6154 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6155 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6156 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6157 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6158 + }
6159 +
6160 +}
6161 +
6162 static void read_shdrs(FILE *fp)
6163 {
6164 - int i;
6165 + unsigned int i;
6166 Elf32_Shdr shdr;
6167
6168 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6169 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
6170
6171 static void read_strtabs(FILE *fp)
6172 {
6173 - int i;
6174 + unsigned int i;
6175 for (i = 0; i < ehdr.e_shnum; i++) {
6176 struct section *sec = &secs[i];
6177 if (sec->shdr.sh_type != SHT_STRTAB) {
6178 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
6179
6180 static void read_symtabs(FILE *fp)
6181 {
6182 - int i,j;
6183 + unsigned int i,j;
6184 for (i = 0; i < ehdr.e_shnum; i++) {
6185 struct section *sec = &secs[i];
6186 if (sec->shdr.sh_type != SHT_SYMTAB) {
6187 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
6188
6189 static void read_relocs(FILE *fp)
6190 {
6191 - int i,j;
6192 + unsigned int i,j;
6193 + uint32_t base;
6194 +
6195 for (i = 0; i < ehdr.e_shnum; i++) {
6196 struct section *sec = &secs[i];
6197 if (sec->shdr.sh_type != SHT_REL) {
6198 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6199 die("Cannot read symbol table: %s\n",
6200 strerror(errno));
6201 }
6202 + base = 0;
6203 + for (j = 0; j < ehdr.e_phnum; j++) {
6204 + if (phdr[j].p_type != PT_LOAD )
6205 + continue;
6206 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6207 + continue;
6208 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6209 + break;
6210 + }
6211 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6212 Elf32_Rel *rel = &sec->reltab[j];
6213 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6214 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6215 rel->r_info = elf32_to_cpu(rel->r_info);
6216 }
6217 }
6218 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6219
6220 static void print_absolute_symbols(void)
6221 {
6222 - int i;
6223 + unsigned int i;
6224 printf("Absolute symbols\n");
6225 printf(" Num: Value Size Type Bind Visibility Name\n");
6226 for (i = 0; i < ehdr.e_shnum; i++) {
6227 struct section *sec = &secs[i];
6228 char *sym_strtab;
6229 Elf32_Sym *sh_symtab;
6230 - int j;
6231 + unsigned int j;
6232
6233 if (sec->shdr.sh_type != SHT_SYMTAB) {
6234 continue;
6235 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6236
6237 static void print_absolute_relocs(void)
6238 {
6239 - int i, printed = 0;
6240 + unsigned int i, printed = 0;
6241
6242 for (i = 0; i < ehdr.e_shnum; i++) {
6243 struct section *sec = &secs[i];
6244 struct section *sec_applies, *sec_symtab;
6245 char *sym_strtab;
6246 Elf32_Sym *sh_symtab;
6247 - int j;
6248 + unsigned int j;
6249 if (sec->shdr.sh_type != SHT_REL) {
6250 continue;
6251 }
6252 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6253
6254 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6255 {
6256 - int i;
6257 + unsigned int i;
6258 /* Walk through the relocations */
6259 for (i = 0; i < ehdr.e_shnum; i++) {
6260 char *sym_strtab;
6261 Elf32_Sym *sh_symtab;
6262 struct section *sec_applies, *sec_symtab;
6263 - int j;
6264 + unsigned int j;
6265 struct section *sec = &secs[i];
6266
6267 if (sec->shdr.sh_type != SHT_REL) {
6268 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6269 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6270 continue;
6271 }
6272 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6273 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6274 + continue;
6275 +
6276 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6277 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6278 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6279 + continue;
6280 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6281 + continue;
6282 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6283 + continue;
6284 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6285 + continue;
6286 +#endif
6287 +
6288 switch (r_type) {
6289 case R_386_NONE:
6290 case R_386_PC32:
6291 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6292
6293 static void emit_relocs(int as_text)
6294 {
6295 - int i;
6296 + unsigned int i;
6297 /* Count how many relocations I have and allocate space for them. */
6298 reloc_count = 0;
6299 walk_relocs(count_reloc);
6300 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6301 fname, strerror(errno));
6302 }
6303 read_ehdr(fp);
6304 + read_phdrs(fp);
6305 read_shdrs(fp);
6306 read_strtabs(fp);
6307 read_symtabs(fp);
6308 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6309 index 4d3ff03..e4972ff 100644
6310 --- a/arch/x86/boot/cpucheck.c
6311 +++ b/arch/x86/boot/cpucheck.c
6312 @@ -74,7 +74,7 @@ static int has_fpu(void)
6313 u16 fcw = -1, fsw = -1;
6314 u32 cr0;
6315
6316 - asm("movl %%cr0,%0" : "=r" (cr0));
6317 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6318 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6319 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6320 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6321 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6322 {
6323 u32 f0, f1;
6324
6325 - asm("pushfl ; "
6326 + asm volatile("pushfl ; "
6327 "pushfl ; "
6328 "popl %0 ; "
6329 "movl %0,%1 ; "
6330 @@ -115,7 +115,7 @@ static void get_flags(void)
6331 set_bit(X86_FEATURE_FPU, cpu.flags);
6332
6333 if (has_eflag(X86_EFLAGS_ID)) {
6334 - asm("cpuid"
6335 + asm volatile("cpuid"
6336 : "=a" (max_intel_level),
6337 "=b" (cpu_vendor[0]),
6338 "=d" (cpu_vendor[1]),
6339 @@ -124,7 +124,7 @@ static void get_flags(void)
6340
6341 if (max_intel_level >= 0x00000001 &&
6342 max_intel_level <= 0x0000ffff) {
6343 - asm("cpuid"
6344 + asm volatile("cpuid"
6345 : "=a" (tfms),
6346 "=c" (cpu.flags[4]),
6347 "=d" (cpu.flags[0])
6348 @@ -136,7 +136,7 @@ static void get_flags(void)
6349 cpu.model += ((tfms >> 16) & 0xf) << 4;
6350 }
6351
6352 - asm("cpuid"
6353 + asm volatile("cpuid"
6354 : "=a" (max_amd_level)
6355 : "a" (0x80000000)
6356 : "ebx", "ecx", "edx");
6357 @@ -144,7 +144,7 @@ static void get_flags(void)
6358 if (max_amd_level >= 0x80000001 &&
6359 max_amd_level <= 0x8000ffff) {
6360 u32 eax = 0x80000001;
6361 - asm("cpuid"
6362 + asm volatile("cpuid"
6363 : "+a" (eax),
6364 "=c" (cpu.flags[6]),
6365 "=d" (cpu.flags[1])
6366 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6367 u32 ecx = MSR_K7_HWCR;
6368 u32 eax, edx;
6369
6370 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6371 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6372 eax &= ~(1 << 15);
6373 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6374 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6375
6376 get_flags(); /* Make sure it really did something */
6377 err = check_flags();
6378 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6379 u32 ecx = MSR_VIA_FCR;
6380 u32 eax, edx;
6381
6382 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6383 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6384 eax |= (1<<1)|(1<<7);
6385 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6386 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6387
6388 set_bit(X86_FEATURE_CX8, cpu.flags);
6389 err = check_flags();
6390 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6391 u32 eax, edx;
6392 u32 level = 1;
6393
6394 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6395 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6396 - asm("cpuid"
6397 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6398 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6399 + asm volatile("cpuid"
6400 : "+a" (level), "=d" (cpu.flags[0])
6401 : : "ecx", "ebx");
6402 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6403 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6404
6405 err = check_flags();
6406 }
6407 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6408 index bdb4d45..0476680 100644
6409 --- a/arch/x86/boot/header.S
6410 +++ b/arch/x86/boot/header.S
6411 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6412 # single linked list of
6413 # struct setup_data
6414
6415 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6416 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6417
6418 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6419 #define VO_INIT_SIZE (VO__end - VO__text)
6420 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6421 index db75d07..8e6d0af 100644
6422 --- a/arch/x86/boot/memory.c
6423 +++ b/arch/x86/boot/memory.c
6424 @@ -19,7 +19,7 @@
6425
6426 static int detect_memory_e820(void)
6427 {
6428 - int count = 0;
6429 + unsigned int count = 0;
6430 struct biosregs ireg, oreg;
6431 struct e820entry *desc = boot_params.e820_map;
6432 static struct e820entry buf; /* static so it is zeroed */
6433 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6434 index 11e8c6e..fdbb1ed 100644
6435 --- a/arch/x86/boot/video-vesa.c
6436 +++ b/arch/x86/boot/video-vesa.c
6437 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6438
6439 boot_params.screen_info.vesapm_seg = oreg.es;
6440 boot_params.screen_info.vesapm_off = oreg.di;
6441 + boot_params.screen_info.vesapm_size = oreg.cx;
6442 }
6443
6444 /*
6445 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6446 index 43eda28..5ab5fdb 100644
6447 --- a/arch/x86/boot/video.c
6448 +++ b/arch/x86/boot/video.c
6449 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6450 static unsigned int get_entry(void)
6451 {
6452 char entry_buf[4];
6453 - int i, len = 0;
6454 + unsigned int i, len = 0;
6455 int key;
6456 unsigned int v;
6457
6458 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6459 index 5b577d5..3c1fed4 100644
6460 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6461 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6462 @@ -8,6 +8,8 @@
6463 * including this sentence is retained in full.
6464 */
6465
6466 +#include <asm/alternative-asm.h>
6467 +
6468 .extern crypto_ft_tab
6469 .extern crypto_it_tab
6470 .extern crypto_fl_tab
6471 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6472 je B192; \
6473 leaq 32(r9),r9;
6474
6475 +#define ret pax_force_retaddr 0, 1; ret
6476 +
6477 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6478 movq r1,r2; \
6479 movq r3,r4; \
6480 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6481 index be6d9e3..21fbbca 100644
6482 --- a/arch/x86/crypto/aesni-intel_asm.S
6483 +++ b/arch/x86/crypto/aesni-intel_asm.S
6484 @@ -31,6 +31,7 @@
6485
6486 #include <linux/linkage.h>
6487 #include <asm/inst.h>
6488 +#include <asm/alternative-asm.h>
6489
6490 #ifdef __x86_64__
6491 .data
6492 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6493 pop %r14
6494 pop %r13
6495 pop %r12
6496 + pax_force_retaddr 0, 1
6497 ret
6498 +ENDPROC(aesni_gcm_dec)
6499
6500
6501 /*****************************************************************************
6502 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6503 pop %r14
6504 pop %r13
6505 pop %r12
6506 + pax_force_retaddr 0, 1
6507 ret
6508 +ENDPROC(aesni_gcm_enc)
6509
6510 #endif
6511
6512 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6513 pxor %xmm1, %xmm0
6514 movaps %xmm0, (TKEYP)
6515 add $0x10, TKEYP
6516 + pax_force_retaddr_bts
6517 ret
6518
6519 .align 4
6520 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6521 shufps $0b01001110, %xmm2, %xmm1
6522 movaps %xmm1, 0x10(TKEYP)
6523 add $0x20, TKEYP
6524 + pax_force_retaddr_bts
6525 ret
6526
6527 .align 4
6528 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6529
6530 movaps %xmm0, (TKEYP)
6531 add $0x10, TKEYP
6532 + pax_force_retaddr_bts
6533 ret
6534
6535 .align 4
6536 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6537 pxor %xmm1, %xmm2
6538 movaps %xmm2, (TKEYP)
6539 add $0x10, TKEYP
6540 + pax_force_retaddr_bts
6541 ret
6542
6543 /*
6544 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6545 #ifndef __x86_64__
6546 popl KEYP
6547 #endif
6548 + pax_force_retaddr 0, 1
6549 ret
6550 +ENDPROC(aesni_set_key)
6551
6552 /*
6553 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6554 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6555 popl KLEN
6556 popl KEYP
6557 #endif
6558 + pax_force_retaddr 0, 1
6559 ret
6560 +ENDPROC(aesni_enc)
6561
6562 /*
6563 * _aesni_enc1: internal ABI
6564 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6565 AESENC KEY STATE
6566 movaps 0x70(TKEYP), KEY
6567 AESENCLAST KEY STATE
6568 + pax_force_retaddr_bts
6569 ret
6570
6571 /*
6572 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6573 AESENCLAST KEY STATE2
6574 AESENCLAST KEY STATE3
6575 AESENCLAST KEY STATE4
6576 + pax_force_retaddr_bts
6577 ret
6578
6579 /*
6580 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6581 popl KLEN
6582 popl KEYP
6583 #endif
6584 + pax_force_retaddr 0, 1
6585 ret
6586 +ENDPROC(aesni_dec)
6587
6588 /*
6589 * _aesni_dec1: internal ABI
6590 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6591 AESDEC KEY STATE
6592 movaps 0x70(TKEYP), KEY
6593 AESDECLAST KEY STATE
6594 + pax_force_retaddr_bts
6595 ret
6596
6597 /*
6598 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6599 AESDECLAST KEY STATE2
6600 AESDECLAST KEY STATE3
6601 AESDECLAST KEY STATE4
6602 + pax_force_retaddr_bts
6603 ret
6604
6605 /*
6606 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6607 popl KEYP
6608 popl LEN
6609 #endif
6610 + pax_force_retaddr 0, 1
6611 ret
6612 +ENDPROC(aesni_ecb_enc)
6613
6614 /*
6615 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6616 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6617 popl KEYP
6618 popl LEN
6619 #endif
6620 + pax_force_retaddr 0, 1
6621 ret
6622 +ENDPROC(aesni_ecb_dec)
6623
6624 /*
6625 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6626 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6627 popl LEN
6628 popl IVP
6629 #endif
6630 + pax_force_retaddr 0, 1
6631 ret
6632 +ENDPROC(aesni_cbc_enc)
6633
6634 /*
6635 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6636 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6637 popl LEN
6638 popl IVP
6639 #endif
6640 + pax_force_retaddr 0, 1
6641 ret
6642 +ENDPROC(aesni_cbc_dec)
6643
6644 #ifdef __x86_64__
6645 .align 16
6646 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6647 mov $1, TCTR_LOW
6648 MOVQ_R64_XMM TCTR_LOW INC
6649 MOVQ_R64_XMM CTR TCTR_LOW
6650 + pax_force_retaddr_bts
6651 ret
6652
6653 /*
6654 @@ -2552,6 +2580,7 @@ _aesni_inc:
6655 .Linc_low:
6656 movaps CTR, IV
6657 PSHUFB_XMM BSWAP_MASK IV
6658 + pax_force_retaddr_bts
6659 ret
6660
6661 /*
6662 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6663 .Lctr_enc_ret:
6664 movups IV, (IVP)
6665 .Lctr_enc_just_ret:
6666 + pax_force_retaddr 0, 1
6667 ret
6668 +ENDPROC(aesni_ctr_enc)
6669 #endif
6670 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6671 index 391d245..67f35c2 100644
6672 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6673 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6674 @@ -20,6 +20,8 @@
6675 *
6676 */
6677
6678 +#include <asm/alternative-asm.h>
6679 +
6680 .file "blowfish-x86_64-asm.S"
6681 .text
6682
6683 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
6684 jnz __enc_xor;
6685
6686 write_block();
6687 + pax_force_retaddr 0, 1
6688 ret;
6689 __enc_xor:
6690 xor_block();
6691 + pax_force_retaddr 0, 1
6692 ret;
6693
6694 .align 8
6695 @@ -188,6 +192,7 @@ blowfish_dec_blk:
6696
6697 movq %r11, %rbp;
6698
6699 + pax_force_retaddr 0, 1
6700 ret;
6701
6702 /**********************************************************************
6703 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6704
6705 popq %rbx;
6706 popq %rbp;
6707 + pax_force_retaddr 0, 1
6708 ret;
6709
6710 __enc_xor4:
6711 @@ -349,6 +355,7 @@ __enc_xor4:
6712
6713 popq %rbx;
6714 popq %rbp;
6715 + pax_force_retaddr 0, 1
6716 ret;
6717
6718 .align 8
6719 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6720 popq %rbx;
6721 popq %rbp;
6722
6723 + pax_force_retaddr 0, 1
6724 ret;
6725
6726 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6727 index 6214a9b..1f4fc9a 100644
6728 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6729 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6730 @@ -1,3 +1,5 @@
6731 +#include <asm/alternative-asm.h>
6732 +
6733 # enter ECRYPT_encrypt_bytes
6734 .text
6735 .p2align 5
6736 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6737 add %r11,%rsp
6738 mov %rdi,%rax
6739 mov %rsi,%rdx
6740 + pax_force_retaddr 0, 1
6741 ret
6742 # bytesatleast65:
6743 ._bytesatleast65:
6744 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6745 add %r11,%rsp
6746 mov %rdi,%rax
6747 mov %rsi,%rdx
6748 + pax_force_retaddr
6749 ret
6750 # enter ECRYPT_ivsetup
6751 .text
6752 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6753 add %r11,%rsp
6754 mov %rdi,%rax
6755 mov %rsi,%rdx
6756 + pax_force_retaddr
6757 ret
6758 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6759 index b2c2f57..8470cab 100644
6760 --- a/arch/x86/crypto/sha1_ssse3_asm.S
6761 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
6762 @@ -28,6 +28,8 @@
6763 * (at your option) any later version.
6764 */
6765
6766 +#include <asm/alternative-asm.h>
6767 +
6768 #define CTX %rdi // arg1
6769 #define BUF %rsi // arg2
6770 #define CNT %rdx // arg3
6771 @@ -104,6 +106,7 @@
6772 pop %r12
6773 pop %rbp
6774 pop %rbx
6775 + pax_force_retaddr 0, 1
6776 ret
6777
6778 .size \name, .-\name
6779 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6780 index 5b012a2..36d5364 100644
6781 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6782 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6783 @@ -20,6 +20,8 @@
6784 *
6785 */
6786
6787 +#include <asm/alternative-asm.h>
6788 +
6789 .file "twofish-x86_64-asm-3way.S"
6790 .text
6791
6792 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6793 popq %r13;
6794 popq %r14;
6795 popq %r15;
6796 + pax_force_retaddr 0, 1
6797 ret;
6798
6799 __enc_xor3:
6800 @@ -271,6 +274,7 @@ __enc_xor3:
6801 popq %r13;
6802 popq %r14;
6803 popq %r15;
6804 + pax_force_retaddr 0, 1
6805 ret;
6806
6807 .global twofish_dec_blk_3way
6808 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6809 popq %r13;
6810 popq %r14;
6811 popq %r15;
6812 + pax_force_retaddr 0, 1
6813 ret;
6814
6815 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6816 index 7bcf3fc..f53832f 100644
6817 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6818 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6819 @@ -21,6 +21,7 @@
6820 .text
6821
6822 #include <asm/asm-offsets.h>
6823 +#include <asm/alternative-asm.h>
6824
6825 #define a_offset 0
6826 #define b_offset 4
6827 @@ -268,6 +269,7 @@ twofish_enc_blk:
6828
6829 popq R1
6830 movq $1,%rax
6831 + pax_force_retaddr 0, 1
6832 ret
6833
6834 twofish_dec_blk:
6835 @@ -319,4 +321,5 @@ twofish_dec_blk:
6836
6837 popq R1
6838 movq $1,%rax
6839 + pax_force_retaddr 0, 1
6840 ret
6841 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6842 index fd84387..0b4af7d 100644
6843 --- a/arch/x86/ia32/ia32_aout.c
6844 +++ b/arch/x86/ia32/ia32_aout.c
6845 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6846 unsigned long dump_start, dump_size;
6847 struct user32 dump;
6848
6849 + memset(&dump, 0, sizeof(dump));
6850 +
6851 fs = get_fs();
6852 set_fs(KERNEL_DS);
6853 has_dumped = 1;
6854 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6855 index 6557769..ef6ae89 100644
6856 --- a/arch/x86/ia32/ia32_signal.c
6857 +++ b/arch/x86/ia32/ia32_signal.c
6858 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6859 }
6860 seg = get_fs();
6861 set_fs(KERNEL_DS);
6862 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6863 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6864 set_fs(seg);
6865 if (ret >= 0 && uoss_ptr) {
6866 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6867 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6868 */
6869 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6870 size_t frame_size,
6871 - void **fpstate)
6872 + void __user **fpstate)
6873 {
6874 unsigned long sp;
6875
6876 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6877
6878 if (used_math()) {
6879 sp = sp - sig_xstate_ia32_size;
6880 - *fpstate = (struct _fpstate_ia32 *) sp;
6881 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6882 if (save_i387_xstate_ia32(*fpstate) < 0)
6883 return (void __user *) -1L;
6884 }
6885 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6886 sp -= frame_size;
6887 /* Align the stack pointer according to the i386 ABI,
6888 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6889 - sp = ((sp + 4) & -16ul) - 4;
6890 + sp = ((sp - 12) & -16ul) - 4;
6891 return (void __user *) sp;
6892 }
6893
6894 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6895 * These are actually not used anymore, but left because some
6896 * gdb versions depend on them as a marker.
6897 */
6898 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6899 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6900 } put_user_catch(err);
6901
6902 if (err)
6903 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6904 0xb8,
6905 __NR_ia32_rt_sigreturn,
6906 0x80cd,
6907 - 0,
6908 + 0
6909 };
6910
6911 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6912 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6913
6914 if (ka->sa.sa_flags & SA_RESTORER)
6915 restorer = ka->sa.sa_restorer;
6916 + else if (current->mm->context.vdso)
6917 + /* Return stub is in 32bit vsyscall page */
6918 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6919 else
6920 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6921 - rt_sigreturn);
6922 + restorer = &frame->retcode;
6923 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6924
6925 /*
6926 * Not actually used anymore, but left because some gdb
6927 * versions need it.
6928 */
6929 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6930 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6931 } put_user_catch(err);
6932
6933 if (err)
6934 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6935 index a6253ec..4ad2120 100644
6936 --- a/arch/x86/ia32/ia32entry.S
6937 +++ b/arch/x86/ia32/ia32entry.S
6938 @@ -13,7 +13,9 @@
6939 #include <asm/thread_info.h>
6940 #include <asm/segment.h>
6941 #include <asm/irqflags.h>
6942 +#include <asm/pgtable.h>
6943 #include <linux/linkage.h>
6944 +#include <asm/alternative-asm.h>
6945
6946 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6947 #include <linux/elf-em.h>
6948 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6949 ENDPROC(native_irq_enable_sysexit)
6950 #endif
6951
6952 + .macro pax_enter_kernel_user
6953 + pax_set_fptr_mask
6954 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6955 + call pax_enter_kernel_user
6956 +#endif
6957 + .endm
6958 +
6959 + .macro pax_exit_kernel_user
6960 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6961 + call pax_exit_kernel_user
6962 +#endif
6963 +#ifdef CONFIG_PAX_RANDKSTACK
6964 + pushq %rax
6965 + pushq %r11
6966 + call pax_randomize_kstack
6967 + popq %r11
6968 + popq %rax
6969 +#endif
6970 + .endm
6971 +
6972 +.macro pax_erase_kstack
6973 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6974 + call pax_erase_kstack
6975 +#endif
6976 +.endm
6977 +
6978 /*
6979 * 32bit SYSENTER instruction entry.
6980 *
6981 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6982 CFI_REGISTER rsp,rbp
6983 SWAPGS_UNSAFE_STACK
6984 movq PER_CPU_VAR(kernel_stack), %rsp
6985 - addq $(KERNEL_STACK_OFFSET),%rsp
6986 - /*
6987 - * No need to follow this irqs on/off section: the syscall
6988 - * disabled irqs, here we enable it straight after entry:
6989 - */
6990 - ENABLE_INTERRUPTS(CLBR_NONE)
6991 movl %ebp,%ebp /* zero extension */
6992 pushq_cfi $__USER32_DS
6993 /*CFI_REL_OFFSET ss,0*/
6994 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6995 CFI_REL_OFFSET rsp,0
6996 pushfq_cfi
6997 /*CFI_REL_OFFSET rflags,0*/
6998 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6999 - CFI_REGISTER rip,r10
7000 + orl $X86_EFLAGS_IF,(%rsp)
7001 + GET_THREAD_INFO(%r11)
7002 + movl TI_sysenter_return(%r11), %r11d
7003 + CFI_REGISTER rip,r11
7004 pushq_cfi $__USER32_CS
7005 /*CFI_REL_OFFSET cs,0*/
7006 movl %eax, %eax
7007 - pushq_cfi %r10
7008 + pushq_cfi %r11
7009 CFI_REL_OFFSET rip,0
7010 pushq_cfi %rax
7011 cld
7012 SAVE_ARGS 0,1,0
7013 + pax_enter_kernel_user
7014 + /*
7015 + * No need to follow this irqs on/off section: the syscall
7016 + * disabled irqs, here we enable it straight after entry:
7017 + */
7018 + ENABLE_INTERRUPTS(CLBR_NONE)
7019 /* no need to do an access_ok check here because rbp has been
7020 32bit zero extended */
7021 +
7022 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7023 + mov $PAX_USER_SHADOW_BASE,%r11
7024 + add %r11,%rbp
7025 +#endif
7026 +
7027 1: movl (%rbp),%ebp
7028 .section __ex_table,"a"
7029 .quad 1b,ia32_badarg
7030 .previous
7031 - GET_THREAD_INFO(%r10)
7032 - orl $TS_COMPAT,TI_status(%r10)
7033 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7034 + GET_THREAD_INFO(%r11)
7035 + orl $TS_COMPAT,TI_status(%r11)
7036 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7037 CFI_REMEMBER_STATE
7038 jnz sysenter_tracesys
7039 cmpq $(IA32_NR_syscalls-1),%rax
7040 @@ -162,13 +198,15 @@ sysenter_do_call:
7041 sysenter_dispatch:
7042 call *ia32_sys_call_table(,%rax,8)
7043 movq %rax,RAX-ARGOFFSET(%rsp)
7044 - GET_THREAD_INFO(%r10)
7045 + GET_THREAD_INFO(%r11)
7046 DISABLE_INTERRUPTS(CLBR_NONE)
7047 TRACE_IRQS_OFF
7048 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7049 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7050 jnz sysexit_audit
7051 sysexit_from_sys_call:
7052 - andl $~TS_COMPAT,TI_status(%r10)
7053 + pax_exit_kernel_user
7054 + pax_erase_kstack
7055 + andl $~TS_COMPAT,TI_status(%r11)
7056 /* clear IF, that popfq doesn't enable interrupts early */
7057 andl $~0x200,EFLAGS-R11(%rsp)
7058 movl RIP-R11(%rsp),%edx /* User %eip */
7059 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
7060 movl %eax,%esi /* 2nd arg: syscall number */
7061 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7062 call audit_syscall_entry
7063 +
7064 + pax_erase_kstack
7065 +
7066 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7067 cmpq $(IA32_NR_syscalls-1),%rax
7068 ja ia32_badsys
7069 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
7070 .endm
7071
7072 .macro auditsys_exit exit
7073 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7074 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7075 jnz ia32_ret_from_sys_call
7076 TRACE_IRQS_ON
7077 sti
7078 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
7079 movzbl %al,%edi /* zero-extend that into %edi */
7080 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7081 call audit_syscall_exit
7082 - GET_THREAD_INFO(%r10)
7083 + GET_THREAD_INFO(%r11)
7084 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7085 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7086 cli
7087 TRACE_IRQS_OFF
7088 - testl %edi,TI_flags(%r10)
7089 + testl %edi,TI_flags(%r11)
7090 jz \exit
7091 CLEAR_RREGS -ARGOFFSET
7092 jmp int_with_check
7093 @@ -238,7 +279,7 @@ sysexit_audit:
7094
7095 sysenter_tracesys:
7096 #ifdef CONFIG_AUDITSYSCALL
7097 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7098 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7099 jz sysenter_auditsys
7100 #endif
7101 SAVE_REST
7102 @@ -246,6 +287,9 @@ sysenter_tracesys:
7103 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
7104 movq %rsp,%rdi /* &pt_regs -> arg1 */
7105 call syscall_trace_enter
7106 +
7107 + pax_erase_kstack
7108 +
7109 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7110 RESTORE_REST
7111 cmpq $(IA32_NR_syscalls-1),%rax
7112 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
7113 ENTRY(ia32_cstar_target)
7114 CFI_STARTPROC32 simple
7115 CFI_SIGNAL_FRAME
7116 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
7117 + CFI_DEF_CFA rsp,0
7118 CFI_REGISTER rip,rcx
7119 /*CFI_REGISTER rflags,r11*/
7120 SWAPGS_UNSAFE_STACK
7121 movl %esp,%r8d
7122 CFI_REGISTER rsp,r8
7123 movq PER_CPU_VAR(kernel_stack),%rsp
7124 + SAVE_ARGS 8*6,0,0
7125 + pax_enter_kernel_user
7126 /*
7127 * No need to follow this irqs on/off section: the syscall
7128 * disabled irqs and here we enable it straight after entry:
7129 */
7130 ENABLE_INTERRUPTS(CLBR_NONE)
7131 - SAVE_ARGS 8,0,0
7132 movl %eax,%eax /* zero extension */
7133 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7134 movq %rcx,RIP-ARGOFFSET(%rsp)
7135 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
7136 /* no need to do an access_ok check here because r8 has been
7137 32bit zero extended */
7138 /* hardware stack frame is complete now */
7139 +
7140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7141 + mov $PAX_USER_SHADOW_BASE,%r11
7142 + add %r11,%r8
7143 +#endif
7144 +
7145 1: movl (%r8),%r9d
7146 .section __ex_table,"a"
7147 .quad 1b,ia32_badarg
7148 .previous
7149 - GET_THREAD_INFO(%r10)
7150 - orl $TS_COMPAT,TI_status(%r10)
7151 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7152 + GET_THREAD_INFO(%r11)
7153 + orl $TS_COMPAT,TI_status(%r11)
7154 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7155 CFI_REMEMBER_STATE
7156 jnz cstar_tracesys
7157 cmpq $IA32_NR_syscalls-1,%rax
7158 @@ -321,13 +372,15 @@ cstar_do_call:
7159 cstar_dispatch:
7160 call *ia32_sys_call_table(,%rax,8)
7161 movq %rax,RAX-ARGOFFSET(%rsp)
7162 - GET_THREAD_INFO(%r10)
7163 + GET_THREAD_INFO(%r11)
7164 DISABLE_INTERRUPTS(CLBR_NONE)
7165 TRACE_IRQS_OFF
7166 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7167 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7168 jnz sysretl_audit
7169 sysretl_from_sys_call:
7170 - andl $~TS_COMPAT,TI_status(%r10)
7171 + pax_exit_kernel_user
7172 + pax_erase_kstack
7173 + andl $~TS_COMPAT,TI_status(%r11)
7174 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
7175 movl RIP-ARGOFFSET(%rsp),%ecx
7176 CFI_REGISTER rip,rcx
7177 @@ -355,7 +408,7 @@ sysretl_audit:
7178
7179 cstar_tracesys:
7180 #ifdef CONFIG_AUDITSYSCALL
7181 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7182 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7183 jz cstar_auditsys
7184 #endif
7185 xchgl %r9d,%ebp
7186 @@ -364,6 +417,9 @@ cstar_tracesys:
7187 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7188 movq %rsp,%rdi /* &pt_regs -> arg1 */
7189 call syscall_trace_enter
7190 +
7191 + pax_erase_kstack
7192 +
7193 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
7194 RESTORE_REST
7195 xchgl %ebp,%r9d
7196 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
7197 CFI_REL_OFFSET rip,RIP-RIP
7198 PARAVIRT_ADJUST_EXCEPTION_FRAME
7199 SWAPGS
7200 - /*
7201 - * No need to follow this irqs on/off section: the syscall
7202 - * disabled irqs and here we enable it straight after entry:
7203 - */
7204 - ENABLE_INTERRUPTS(CLBR_NONE)
7205 movl %eax,%eax
7206 pushq_cfi %rax
7207 cld
7208 /* note the registers are not zero extended to the sf.
7209 this could be a problem. */
7210 SAVE_ARGS 0,1,0
7211 - GET_THREAD_INFO(%r10)
7212 - orl $TS_COMPAT,TI_status(%r10)
7213 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7214 + pax_enter_kernel_user
7215 + /*
7216 + * No need to follow this irqs on/off section: the syscall
7217 + * disabled irqs and here we enable it straight after entry:
7218 + */
7219 + ENABLE_INTERRUPTS(CLBR_NONE)
7220 + GET_THREAD_INFO(%r11)
7221 + orl $TS_COMPAT,TI_status(%r11)
7222 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7223 jnz ia32_tracesys
7224 cmpq $(IA32_NR_syscalls-1),%rax
7225 ja ia32_badsys
7226 @@ -441,6 +498,9 @@ ia32_tracesys:
7227 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7228 movq %rsp,%rdi /* &pt_regs -> arg1 */
7229 call syscall_trace_enter
7230 +
7231 + pax_erase_kstack
7232 +
7233 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7234 RESTORE_REST
7235 cmpq $(IA32_NR_syscalls-1),%rax
7236 @@ -455,6 +515,7 @@ ia32_badsys:
7237
7238 quiet_ni_syscall:
7239 movq $-ENOSYS,%rax
7240 + pax_force_retaddr
7241 ret
7242 CFI_ENDPROC
7243
7244 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
7245 index f6f5c53..b358b28 100644
7246 --- a/arch/x86/ia32/sys_ia32.c
7247 +++ b/arch/x86/ia32/sys_ia32.c
7248 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
7249 */
7250 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
7251 {
7252 - typeof(ubuf->st_uid) uid = 0;
7253 - typeof(ubuf->st_gid) gid = 0;
7254 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
7255 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
7256 SET_UID(uid, stat->uid);
7257 SET_GID(gid, stat->gid);
7258 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7259 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7260 }
7261 set_fs(KERNEL_DS);
7262 ret = sys_rt_sigprocmask(how,
7263 - set ? (sigset_t __user *)&s : NULL,
7264 - oset ? (sigset_t __user *)&s : NULL,
7265 + set ? (sigset_t __force_user *)&s : NULL,
7266 + oset ? (sigset_t __force_user *)&s : NULL,
7267 sigsetsize);
7268 set_fs(old_fs);
7269 if (ret)
7270 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7271 return alarm_setitimer(seconds);
7272 }
7273
7274 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7275 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7276 int options)
7277 {
7278 return compat_sys_wait4(pid, stat_addr, options, NULL);
7279 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7280 mm_segment_t old_fs = get_fs();
7281
7282 set_fs(KERNEL_DS);
7283 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7284 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7285 set_fs(old_fs);
7286 if (put_compat_timespec(&t, interval))
7287 return -EFAULT;
7288 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7289 mm_segment_t old_fs = get_fs();
7290
7291 set_fs(KERNEL_DS);
7292 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7293 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7294 set_fs(old_fs);
7295 if (!ret) {
7296 switch (_NSIG_WORDS) {
7297 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7298 if (copy_siginfo_from_user32(&info, uinfo))
7299 return -EFAULT;
7300 set_fs(KERNEL_DS);
7301 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7302 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7303 set_fs(old_fs);
7304 return ret;
7305 }
7306 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7307 return -EFAULT;
7308
7309 set_fs(KERNEL_DS);
7310 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7311 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7312 count);
7313 set_fs(old_fs);
7314
7315 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7316 index 091508b..7692c6f 100644
7317 --- a/arch/x86/include/asm/alternative-asm.h
7318 +++ b/arch/x86/include/asm/alternative-asm.h
7319 @@ -4,10 +4,10 @@
7320
7321 #ifdef CONFIG_SMP
7322 .macro LOCK_PREFIX
7323 -1: lock
7324 +672: lock
7325 .section .smp_locks,"a"
7326 .balign 4
7327 - .long 1b - .
7328 + .long 672b - .
7329 .previous
7330 .endm
7331 #else
7332 @@ -15,6 +15,45 @@
7333 .endm
7334 #endif
7335
7336 +#ifdef KERNEXEC_PLUGIN
7337 + .macro pax_force_retaddr_bts rip=0
7338 + btsq $63,\rip(%rsp)
7339 + .endm
7340 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7341 + .macro pax_force_retaddr rip=0, reload=0
7342 + btsq $63,\rip(%rsp)
7343 + .endm
7344 + .macro pax_force_fptr ptr
7345 + btsq $63,\ptr
7346 + .endm
7347 + .macro pax_set_fptr_mask
7348 + .endm
7349 +#endif
7350 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7351 + .macro pax_force_retaddr rip=0, reload=0
7352 + .if \reload
7353 + pax_set_fptr_mask
7354 + .endif
7355 + orq %r10,\rip(%rsp)
7356 + .endm
7357 + .macro pax_force_fptr ptr
7358 + orq %r10,\ptr
7359 + .endm
7360 + .macro pax_set_fptr_mask
7361 + movabs $0x8000000000000000,%r10
7362 + .endm
7363 +#endif
7364 +#else
7365 + .macro pax_force_retaddr rip=0, reload=0
7366 + .endm
7367 + .macro pax_force_fptr ptr
7368 + .endm
7369 + .macro pax_force_retaddr_bts rip=0
7370 + .endm
7371 + .macro pax_set_fptr_mask
7372 + .endm
7373 +#endif
7374 +
7375 .macro altinstruction_entry orig alt feature orig_len alt_len
7376 .long \orig - .
7377 .long \alt - .
7378 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7379 index 37ad100..7d47faa 100644
7380 --- a/arch/x86/include/asm/alternative.h
7381 +++ b/arch/x86/include/asm/alternative.h
7382 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7383 ".section .discard,\"aw\",@progbits\n" \
7384 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7385 ".previous\n" \
7386 - ".section .altinstr_replacement, \"ax\"\n" \
7387 + ".section .altinstr_replacement, \"a\"\n" \
7388 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7389 ".previous"
7390
7391 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7392 index 1a6c09a..fec2432 100644
7393 --- a/arch/x86/include/asm/apic.h
7394 +++ b/arch/x86/include/asm/apic.h
7395 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7396
7397 #ifdef CONFIG_X86_LOCAL_APIC
7398
7399 -extern unsigned int apic_verbosity;
7400 +extern int apic_verbosity;
7401 extern int local_apic_timer_c2_ok;
7402
7403 extern int disable_apic;
7404 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7405 index 20370c6..a2eb9b0 100644
7406 --- a/arch/x86/include/asm/apm.h
7407 +++ b/arch/x86/include/asm/apm.h
7408 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7409 __asm__ __volatile__(APM_DO_ZERO_SEGS
7410 "pushl %%edi\n\t"
7411 "pushl %%ebp\n\t"
7412 - "lcall *%%cs:apm_bios_entry\n\t"
7413 + "lcall *%%ss:apm_bios_entry\n\t"
7414 "setc %%al\n\t"
7415 "popl %%ebp\n\t"
7416 "popl %%edi\n\t"
7417 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7418 __asm__ __volatile__(APM_DO_ZERO_SEGS
7419 "pushl %%edi\n\t"
7420 "pushl %%ebp\n\t"
7421 - "lcall *%%cs:apm_bios_entry\n\t"
7422 + "lcall *%%ss:apm_bios_entry\n\t"
7423 "setc %%bl\n\t"
7424 "popl %%ebp\n\t"
7425 "popl %%edi\n\t"
7426 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7427 index 58cb6d4..ca9010d 100644
7428 --- a/arch/x86/include/asm/atomic.h
7429 +++ b/arch/x86/include/asm/atomic.h
7430 @@ -22,7 +22,18 @@
7431 */
7432 static inline int atomic_read(const atomic_t *v)
7433 {
7434 - return (*(volatile int *)&(v)->counter);
7435 + return (*(volatile const int *)&(v)->counter);
7436 +}
7437 +
7438 +/**
7439 + * atomic_read_unchecked - read atomic variable
7440 + * @v: pointer of type atomic_unchecked_t
7441 + *
7442 + * Atomically reads the value of @v.
7443 + */
7444 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7445 +{
7446 + return (*(volatile const int *)&(v)->counter);
7447 }
7448
7449 /**
7450 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7451 }
7452
7453 /**
7454 + * atomic_set_unchecked - set atomic variable
7455 + * @v: pointer of type atomic_unchecked_t
7456 + * @i: required value
7457 + *
7458 + * Atomically sets the value of @v to @i.
7459 + */
7460 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7461 +{
7462 + v->counter = i;
7463 +}
7464 +
7465 +/**
7466 * atomic_add - add integer to atomic variable
7467 * @i: integer value to add
7468 * @v: pointer of type atomic_t
7469 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7470 */
7471 static inline void atomic_add(int i, atomic_t *v)
7472 {
7473 - asm volatile(LOCK_PREFIX "addl %1,%0"
7474 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7475 +
7476 +#ifdef CONFIG_PAX_REFCOUNT
7477 + "jno 0f\n"
7478 + LOCK_PREFIX "subl %1,%0\n"
7479 + "int $4\n0:\n"
7480 + _ASM_EXTABLE(0b, 0b)
7481 +#endif
7482 +
7483 + : "+m" (v->counter)
7484 + : "ir" (i));
7485 +}
7486 +
7487 +/**
7488 + * atomic_add_unchecked - add integer to atomic variable
7489 + * @i: integer value to add
7490 + * @v: pointer of type atomic_unchecked_t
7491 + *
7492 + * Atomically adds @i to @v.
7493 + */
7494 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7495 +{
7496 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7497 : "+m" (v->counter)
7498 : "ir" (i));
7499 }
7500 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7501 */
7502 static inline void atomic_sub(int i, atomic_t *v)
7503 {
7504 - asm volatile(LOCK_PREFIX "subl %1,%0"
7505 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 + "jno 0f\n"
7509 + LOCK_PREFIX "addl %1,%0\n"
7510 + "int $4\n0:\n"
7511 + _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514 + : "+m" (v->counter)
7515 + : "ir" (i));
7516 +}
7517 +
7518 +/**
7519 + * atomic_sub_unchecked - subtract integer from atomic variable
7520 + * @i: integer value to subtract
7521 + * @v: pointer of type atomic_unchecked_t
7522 + *
7523 + * Atomically subtracts @i from @v.
7524 + */
7525 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7526 +{
7527 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7528 : "+m" (v->counter)
7529 : "ir" (i));
7530 }
7531 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7532 {
7533 unsigned char c;
7534
7535 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7536 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7537 +
7538 +#ifdef CONFIG_PAX_REFCOUNT
7539 + "jno 0f\n"
7540 + LOCK_PREFIX "addl %2,%0\n"
7541 + "int $4\n0:\n"
7542 + _ASM_EXTABLE(0b, 0b)
7543 +#endif
7544 +
7545 + "sete %1\n"
7546 : "+m" (v->counter), "=qm" (c)
7547 : "ir" (i) : "memory");
7548 return c;
7549 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7550 */
7551 static inline void atomic_inc(atomic_t *v)
7552 {
7553 - asm volatile(LOCK_PREFIX "incl %0"
7554 + asm volatile(LOCK_PREFIX "incl %0\n"
7555 +
7556 +#ifdef CONFIG_PAX_REFCOUNT
7557 + "jno 0f\n"
7558 + LOCK_PREFIX "decl %0\n"
7559 + "int $4\n0:\n"
7560 + _ASM_EXTABLE(0b, 0b)
7561 +#endif
7562 +
7563 + : "+m" (v->counter));
7564 +}
7565 +
7566 +/**
7567 + * atomic_inc_unchecked - increment atomic variable
7568 + * @v: pointer of type atomic_unchecked_t
7569 + *
7570 + * Atomically increments @v by 1.
7571 + */
7572 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7573 +{
7574 + asm volatile(LOCK_PREFIX "incl %0\n"
7575 : "+m" (v->counter));
7576 }
7577
7578 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7579 */
7580 static inline void atomic_dec(atomic_t *v)
7581 {
7582 - asm volatile(LOCK_PREFIX "decl %0"
7583 + asm volatile(LOCK_PREFIX "decl %0\n"
7584 +
7585 +#ifdef CONFIG_PAX_REFCOUNT
7586 + "jno 0f\n"
7587 + LOCK_PREFIX "incl %0\n"
7588 + "int $4\n0:\n"
7589 + _ASM_EXTABLE(0b, 0b)
7590 +#endif
7591 +
7592 + : "+m" (v->counter));
7593 +}
7594 +
7595 +/**
7596 + * atomic_dec_unchecked - decrement atomic variable
7597 + * @v: pointer of type atomic_unchecked_t
7598 + *
7599 + * Atomically decrements @v by 1.
7600 + */
7601 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7602 +{
7603 + asm volatile(LOCK_PREFIX "decl %0\n"
7604 : "+m" (v->counter));
7605 }
7606
7607 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7608 {
7609 unsigned char c;
7610
7611 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7612 + asm volatile(LOCK_PREFIX "decl %0\n"
7613 +
7614 +#ifdef CONFIG_PAX_REFCOUNT
7615 + "jno 0f\n"
7616 + LOCK_PREFIX "incl %0\n"
7617 + "int $4\n0:\n"
7618 + _ASM_EXTABLE(0b, 0b)
7619 +#endif
7620 +
7621 + "sete %1\n"
7622 : "+m" (v->counter), "=qm" (c)
7623 : : "memory");
7624 return c != 0;
7625 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7626 {
7627 unsigned char c;
7628
7629 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7630 + asm volatile(LOCK_PREFIX "incl %0\n"
7631 +
7632 +#ifdef CONFIG_PAX_REFCOUNT
7633 + "jno 0f\n"
7634 + LOCK_PREFIX "decl %0\n"
7635 + "int $4\n0:\n"
7636 + _ASM_EXTABLE(0b, 0b)
7637 +#endif
7638 +
7639 + "sete %1\n"
7640 + : "+m" (v->counter), "=qm" (c)
7641 + : : "memory");
7642 + return c != 0;
7643 +}
7644 +
7645 +/**
7646 + * atomic_inc_and_test_unchecked - increment and test
7647 + * @v: pointer of type atomic_unchecked_t
7648 + *
7649 + * Atomically increments @v by 1
7650 + * and returns true if the result is zero, or false for all
7651 + * other cases.
7652 + */
7653 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7654 +{
7655 + unsigned char c;
7656 +
7657 + asm volatile(LOCK_PREFIX "incl %0\n"
7658 + "sete %1\n"
7659 : "+m" (v->counter), "=qm" (c)
7660 : : "memory");
7661 return c != 0;
7662 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7663 {
7664 unsigned char c;
7665
7666 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7667 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7668 +
7669 +#ifdef CONFIG_PAX_REFCOUNT
7670 + "jno 0f\n"
7671 + LOCK_PREFIX "subl %2,%0\n"
7672 + "int $4\n0:\n"
7673 + _ASM_EXTABLE(0b, 0b)
7674 +#endif
7675 +
7676 + "sets %1\n"
7677 : "+m" (v->counter), "=qm" (c)
7678 : "ir" (i) : "memory");
7679 return c;
7680 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7681 goto no_xadd;
7682 #endif
7683 /* Modern 486+ processor */
7684 - return i + xadd(&v->counter, i);
7685 + return i + xadd_check_overflow(&v->counter, i);
7686
7687 #ifdef CONFIG_M386
7688 no_xadd: /* Legacy 386 processor */
7689 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7690 }
7691
7692 /**
7693 + * atomic_add_return_unchecked - add integer and return
7694 + * @i: integer value to add
7695 + * @v: pointer of type atomic_unchecked_t
7696 + *
7697 + * Atomically adds @i to @v and returns @i + @v
7698 + */
7699 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7700 +{
7701 +#ifdef CONFIG_M386
7702 + int __i;
7703 + unsigned long flags;
7704 + if (unlikely(boot_cpu_data.x86 <= 3))
7705 + goto no_xadd;
7706 +#endif
7707 + /* Modern 486+ processor */
7708 + return i + xadd(&v->counter, i);
7709 +
7710 +#ifdef CONFIG_M386
7711 +no_xadd: /* Legacy 386 processor */
7712 + raw_local_irq_save(flags);
7713 + __i = atomic_read_unchecked(v);
7714 + atomic_set_unchecked(v, i + __i);
7715 + raw_local_irq_restore(flags);
7716 + return i + __i;
7717 +#endif
7718 +}
7719 +
7720 +/**
7721 * atomic_sub_return - subtract integer and return
7722 * @v: pointer of type atomic_t
7723 * @i: integer value to subtract
7724 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7725 }
7726
7727 #define atomic_inc_return(v) (atomic_add_return(1, v))
7728 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7729 +{
7730 + return atomic_add_return_unchecked(1, v);
7731 +}
7732 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7733
7734 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7735 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7736 return cmpxchg(&v->counter, old, new);
7737 }
7738
7739 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7740 +{
7741 + return cmpxchg(&v->counter, old, new);
7742 +}
7743 +
7744 static inline int atomic_xchg(atomic_t *v, int new)
7745 {
7746 return xchg(&v->counter, new);
7747 }
7748
7749 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7750 +{
7751 + return xchg(&v->counter, new);
7752 +}
7753 +
7754 /**
7755 * __atomic_add_unless - add unless the number is already a given value
7756 * @v: pointer of type atomic_t
7757 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7758 */
7759 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7760 {
7761 - int c, old;
7762 + int c, old, new;
7763 c = atomic_read(v);
7764 for (;;) {
7765 - if (unlikely(c == (u)))
7766 + if (unlikely(c == u))
7767 break;
7768 - old = atomic_cmpxchg((v), c, c + (a));
7769 +
7770 + asm volatile("addl %2,%0\n"
7771 +
7772 +#ifdef CONFIG_PAX_REFCOUNT
7773 + "jno 0f\n"
7774 + "subl %2,%0\n"
7775 + "int $4\n0:\n"
7776 + _ASM_EXTABLE(0b, 0b)
7777 +#endif
7778 +
7779 + : "=r" (new)
7780 + : "0" (c), "ir" (a));
7781 +
7782 + old = atomic_cmpxchg(v, c, new);
7783 if (likely(old == c))
7784 break;
7785 c = old;
7786 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7787 return c;
7788 }
7789
7790 +/**
7791 + * atomic_inc_not_zero_hint - increment if not null
7792 + * @v: pointer of type atomic_t
7793 + * @hint: probable value of the atomic before the increment
7794 + *
7795 + * This version of atomic_inc_not_zero() gives a hint of probable
7796 + * value of the atomic. This helps processor to not read the memory
7797 + * before doing the atomic read/modify/write cycle, lowering
7798 + * number of bus transactions on some arches.
7799 + *
7800 + * Returns: 0 if increment was not done, 1 otherwise.
7801 + */
7802 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7803 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7804 +{
7805 + int val, c = hint, new;
7806 +
7807 + /* sanity test, should be removed by compiler if hint is a constant */
7808 + if (!hint)
7809 + return __atomic_add_unless(v, 1, 0);
7810 +
7811 + do {
7812 + asm volatile("incl %0\n"
7813 +
7814 +#ifdef CONFIG_PAX_REFCOUNT
7815 + "jno 0f\n"
7816 + "decl %0\n"
7817 + "int $4\n0:\n"
7818 + _ASM_EXTABLE(0b, 0b)
7819 +#endif
7820 +
7821 + : "=r" (new)
7822 + : "0" (c));
7823 +
7824 + val = atomic_cmpxchg(v, c, new);
7825 + if (val == c)
7826 + return 1;
7827 + c = val;
7828 + } while (c);
7829 +
7830 + return 0;
7831 +}
7832
7833 /*
7834 * atomic_dec_if_positive - decrement by 1 if old value positive
7835 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7836 index 24098aa..1e37723 100644
7837 --- a/arch/x86/include/asm/atomic64_32.h
7838 +++ b/arch/x86/include/asm/atomic64_32.h
7839 @@ -12,6 +12,14 @@ typedef struct {
7840 u64 __aligned(8) counter;
7841 } atomic64_t;
7842
7843 +#ifdef CONFIG_PAX_REFCOUNT
7844 +typedef struct {
7845 + u64 __aligned(8) counter;
7846 +} atomic64_unchecked_t;
7847 +#else
7848 +typedef atomic64_t atomic64_unchecked_t;
7849 +#endif
7850 +
7851 #define ATOMIC64_INIT(val) { (val) }
7852
7853 #ifdef CONFIG_X86_CMPXCHG64
7854 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7855 }
7856
7857 /**
7858 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7859 + * @p: pointer to type atomic64_unchecked_t
7860 + * @o: expected value
7861 + * @n: new value
7862 + *
7863 + * Atomically sets @v to @n if it was equal to @o and returns
7864 + * the old value.
7865 + */
7866 +
7867 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7868 +{
7869 + return cmpxchg64(&v->counter, o, n);
7870 +}
7871 +
7872 +/**
7873 * atomic64_xchg - xchg atomic64 variable
7874 * @v: pointer to type atomic64_t
7875 * @n: value to assign
7876 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7877 }
7878
7879 /**
7880 + * atomic64_set_unchecked - set atomic64 variable
7881 + * @v: pointer to type atomic64_unchecked_t
7882 + * @n: value to assign
7883 + *
7884 + * Atomically sets the value of @v to @n.
7885 + */
7886 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7887 +{
7888 + unsigned high = (unsigned)(i >> 32);
7889 + unsigned low = (unsigned)i;
7890 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7891 + : "+b" (low), "+c" (high)
7892 + : "S" (v)
7893 + : "eax", "edx", "memory"
7894 + );
7895 +}
7896 +
7897 +/**
7898 * atomic64_read - read atomic64 variable
7899 * @v: pointer to type atomic64_t
7900 *
7901 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7902 }
7903
7904 /**
7905 + * atomic64_read_unchecked - read atomic64 variable
7906 + * @v: pointer to type atomic64_unchecked_t
7907 + *
7908 + * Atomically reads the value of @v and returns it.
7909 + */
7910 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7911 +{
7912 + long long r;
7913 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7914 + : "=A" (r), "+c" (v)
7915 + : : "memory"
7916 + );
7917 + return r;
7918 + }
7919 +
7920 +/**
7921 * atomic64_add_return - add and return
7922 * @i: integer value to add
7923 * @v: pointer to type atomic64_t
7924 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7925 return i;
7926 }
7927
7928 +/**
7929 + * atomic64_add_return_unchecked - add and return
7930 + * @i: integer value to add
7931 + * @v: pointer to type atomic64_unchecked_t
7932 + *
7933 + * Atomically adds @i to @v and returns @i + *@v
7934 + */
7935 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7936 +{
7937 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7938 + : "+A" (i), "+c" (v)
7939 + : : "memory"
7940 + );
7941 + return i;
7942 +}
7943 +
7944 /*
7945 * Other variants with different arithmetic operators:
7946 */
7947 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7948 return a;
7949 }
7950
7951 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7952 +{
7953 + long long a;
7954 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7955 + : "=A" (a)
7956 + : "S" (v)
7957 + : "memory", "ecx"
7958 + );
7959 + return a;
7960 +}
7961 +
7962 static inline long long atomic64_dec_return(atomic64_t *v)
7963 {
7964 long long a;
7965 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7966 }
7967
7968 /**
7969 + * atomic64_add_unchecked - add integer to atomic64 variable
7970 + * @i: integer value to add
7971 + * @v: pointer to type atomic64_unchecked_t
7972 + *
7973 + * Atomically adds @i to @v.
7974 + */
7975 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7976 +{
7977 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7978 + : "+A" (i), "+c" (v)
7979 + : : "memory"
7980 + );
7981 + return i;
7982 +}
7983 +
7984 +/**
7985 * atomic64_sub - subtract the atomic64 variable
7986 * @i: integer value to subtract
7987 * @v: pointer to type atomic64_t
7988 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7989 index 0e1cbfc..5623683 100644
7990 --- a/arch/x86/include/asm/atomic64_64.h
7991 +++ b/arch/x86/include/asm/atomic64_64.h
7992 @@ -18,7 +18,19 @@
7993 */
7994 static inline long atomic64_read(const atomic64_t *v)
7995 {
7996 - return (*(volatile long *)&(v)->counter);
7997 + return (*(volatile const long *)&(v)->counter);
7998 +}
7999 +
8000 +/**
8001 + * atomic64_read_unchecked - read atomic64 variable
8002 + * @v: pointer of type atomic64_unchecked_t
8003 + *
8004 + * Atomically reads the value of @v.
8005 + * Doesn't imply a read memory barrier.
8006 + */
8007 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8008 +{
8009 + return (*(volatile const long *)&(v)->counter);
8010 }
8011
8012 /**
8013 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
8014 }
8015
8016 /**
8017 + * atomic64_set_unchecked - set atomic64 variable
8018 + * @v: pointer to type atomic64_unchecked_t
8019 + * @i: required value
8020 + *
8021 + * Atomically sets the value of @v to @i.
8022 + */
8023 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8024 +{
8025 + v->counter = i;
8026 +}
8027 +
8028 +/**
8029 * atomic64_add - add integer to atomic64 variable
8030 * @i: integer value to add
8031 * @v: pointer to type atomic64_t
8032 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
8033 */
8034 static inline void atomic64_add(long i, atomic64_t *v)
8035 {
8036 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
8037 +
8038 +#ifdef CONFIG_PAX_REFCOUNT
8039 + "jno 0f\n"
8040 + LOCK_PREFIX "subq %1,%0\n"
8041 + "int $4\n0:\n"
8042 + _ASM_EXTABLE(0b, 0b)
8043 +#endif
8044 +
8045 + : "=m" (v->counter)
8046 + : "er" (i), "m" (v->counter));
8047 +}
8048 +
8049 +/**
8050 + * atomic64_add_unchecked - add integer to atomic64 variable
8051 + * @i: integer value to add
8052 + * @v: pointer to type atomic64_unchecked_t
8053 + *
8054 + * Atomically adds @i to @v.
8055 + */
8056 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
8057 +{
8058 asm volatile(LOCK_PREFIX "addq %1,%0"
8059 : "=m" (v->counter)
8060 : "er" (i), "m" (v->counter));
8061 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
8062 */
8063 static inline void atomic64_sub(long i, atomic64_t *v)
8064 {
8065 - asm volatile(LOCK_PREFIX "subq %1,%0"
8066 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8067 +
8068 +#ifdef CONFIG_PAX_REFCOUNT
8069 + "jno 0f\n"
8070 + LOCK_PREFIX "addq %1,%0\n"
8071 + "int $4\n0:\n"
8072 + _ASM_EXTABLE(0b, 0b)
8073 +#endif
8074 +
8075 + : "=m" (v->counter)
8076 + : "er" (i), "m" (v->counter));
8077 +}
8078 +
8079 +/**
8080 + * atomic64_sub_unchecked - subtract the atomic64 variable
8081 + * @i: integer value to subtract
8082 + * @v: pointer to type atomic64_unchecked_t
8083 + *
8084 + * Atomically subtracts @i from @v.
8085 + */
8086 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
8087 +{
8088 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8089 : "=m" (v->counter)
8090 : "er" (i), "m" (v->counter));
8091 }
8092 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8093 {
8094 unsigned char c;
8095
8096 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
8097 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
8098 +
8099 +#ifdef CONFIG_PAX_REFCOUNT
8100 + "jno 0f\n"
8101 + LOCK_PREFIX "addq %2,%0\n"
8102 + "int $4\n0:\n"
8103 + _ASM_EXTABLE(0b, 0b)
8104 +#endif
8105 +
8106 + "sete %1\n"
8107 : "=m" (v->counter), "=qm" (c)
8108 : "er" (i), "m" (v->counter) : "memory");
8109 return c;
8110 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8111 */
8112 static inline void atomic64_inc(atomic64_t *v)
8113 {
8114 + asm volatile(LOCK_PREFIX "incq %0\n"
8115 +
8116 +#ifdef CONFIG_PAX_REFCOUNT
8117 + "jno 0f\n"
8118 + LOCK_PREFIX "decq %0\n"
8119 + "int $4\n0:\n"
8120 + _ASM_EXTABLE(0b, 0b)
8121 +#endif
8122 +
8123 + : "=m" (v->counter)
8124 + : "m" (v->counter));
8125 +}
8126 +
8127 +/**
8128 + * atomic64_inc_unchecked - increment atomic64 variable
8129 + * @v: pointer to type atomic64_unchecked_t
8130 + *
8131 + * Atomically increments @v by 1.
8132 + */
8133 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8134 +{
8135 asm volatile(LOCK_PREFIX "incq %0"
8136 : "=m" (v->counter)
8137 : "m" (v->counter));
8138 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
8139 */
8140 static inline void atomic64_dec(atomic64_t *v)
8141 {
8142 - asm volatile(LOCK_PREFIX "decq %0"
8143 + asm volatile(LOCK_PREFIX "decq %0\n"
8144 +
8145 +#ifdef CONFIG_PAX_REFCOUNT
8146 + "jno 0f\n"
8147 + LOCK_PREFIX "incq %0\n"
8148 + "int $4\n0:\n"
8149 + _ASM_EXTABLE(0b, 0b)
8150 +#endif
8151 +
8152 + : "=m" (v->counter)
8153 + : "m" (v->counter));
8154 +}
8155 +
8156 +/**
8157 + * atomic64_dec_unchecked - decrement atomic64 variable
8158 + * @v: pointer to type atomic64_t
8159 + *
8160 + * Atomically decrements @v by 1.
8161 + */
8162 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8163 +{
8164 + asm volatile(LOCK_PREFIX "decq %0\n"
8165 : "=m" (v->counter)
8166 : "m" (v->counter));
8167 }
8168 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
8169 {
8170 unsigned char c;
8171
8172 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
8173 + asm volatile(LOCK_PREFIX "decq %0\n"
8174 +
8175 +#ifdef CONFIG_PAX_REFCOUNT
8176 + "jno 0f\n"
8177 + LOCK_PREFIX "incq %0\n"
8178 + "int $4\n0:\n"
8179 + _ASM_EXTABLE(0b, 0b)
8180 +#endif
8181 +
8182 + "sete %1\n"
8183 : "=m" (v->counter), "=qm" (c)
8184 : "m" (v->counter) : "memory");
8185 return c != 0;
8186 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
8187 {
8188 unsigned char c;
8189
8190 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
8191 + asm volatile(LOCK_PREFIX "incq %0\n"
8192 +
8193 +#ifdef CONFIG_PAX_REFCOUNT
8194 + "jno 0f\n"
8195 + LOCK_PREFIX "decq %0\n"
8196 + "int $4\n0:\n"
8197 + _ASM_EXTABLE(0b, 0b)
8198 +#endif
8199 +
8200 + "sete %1\n"
8201 : "=m" (v->counter), "=qm" (c)
8202 : "m" (v->counter) : "memory");
8203 return c != 0;
8204 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8205 {
8206 unsigned char c;
8207
8208 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8209 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
8210 +
8211 +#ifdef CONFIG_PAX_REFCOUNT
8212 + "jno 0f\n"
8213 + LOCK_PREFIX "subq %2,%0\n"
8214 + "int $4\n0:\n"
8215 + _ASM_EXTABLE(0b, 0b)
8216 +#endif
8217 +
8218 + "sets %1\n"
8219 : "=m" (v->counter), "=qm" (c)
8220 : "er" (i), "m" (v->counter) : "memory");
8221 return c;
8222 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8223 */
8224 static inline long atomic64_add_return(long i, atomic64_t *v)
8225 {
8226 + return i + xadd_check_overflow(&v->counter, i);
8227 +}
8228 +
8229 +/**
8230 + * atomic64_add_return_unchecked - add and return
8231 + * @i: integer value to add
8232 + * @v: pointer to type atomic64_unchecked_t
8233 + *
8234 + * Atomically adds @i to @v and returns @i + @v
8235 + */
8236 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8237 +{
8238 return i + xadd(&v->counter, i);
8239 }
8240
8241 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
8242 }
8243
8244 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8245 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8246 +{
8247 + return atomic64_add_return_unchecked(1, v);
8248 +}
8249 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8250
8251 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8252 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8253 return cmpxchg(&v->counter, old, new);
8254 }
8255
8256 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8257 +{
8258 + return cmpxchg(&v->counter, old, new);
8259 +}
8260 +
8261 static inline long atomic64_xchg(atomic64_t *v, long new)
8262 {
8263 return xchg(&v->counter, new);
8264 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8265 */
8266 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8267 {
8268 - long c, old;
8269 + long c, old, new;
8270 c = atomic64_read(v);
8271 for (;;) {
8272 - if (unlikely(c == (u)))
8273 + if (unlikely(c == u))
8274 break;
8275 - old = atomic64_cmpxchg((v), c, c + (a));
8276 +
8277 + asm volatile("add %2,%0\n"
8278 +
8279 +#ifdef CONFIG_PAX_REFCOUNT
8280 + "jno 0f\n"
8281 + "sub %2,%0\n"
8282 + "int $4\n0:\n"
8283 + _ASM_EXTABLE(0b, 0b)
8284 +#endif
8285 +
8286 + : "=r" (new)
8287 + : "0" (c), "ir" (a));
8288 +
8289 + old = atomic64_cmpxchg(v, c, new);
8290 if (likely(old == c))
8291 break;
8292 c = old;
8293 }
8294 - return c != (u);
8295 + return c != u;
8296 }
8297
8298 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8299 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8300 index 1775d6e..b65017f 100644
8301 --- a/arch/x86/include/asm/bitops.h
8302 +++ b/arch/x86/include/asm/bitops.h
8303 @@ -38,7 +38,7 @@
8304 * a mask operation on a byte.
8305 */
8306 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8307 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8308 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8309 #define CONST_MASK(nr) (1 << ((nr) & 7))
8310
8311 /**
8312 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8313 index 5e1a2ee..c9f9533 100644
8314 --- a/arch/x86/include/asm/boot.h
8315 +++ b/arch/x86/include/asm/boot.h
8316 @@ -11,10 +11,15 @@
8317 #include <asm/pgtable_types.h>
8318
8319 /* Physical address where kernel should be loaded. */
8320 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8321 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8322 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8323 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8324
8325 +#ifndef __ASSEMBLY__
8326 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8327 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8328 +#endif
8329 +
8330 /* Minimum kernel alignment, as a power of two */
8331 #ifdef CONFIG_X86_64
8332 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8333 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8334 index 48f99f1..d78ebf9 100644
8335 --- a/arch/x86/include/asm/cache.h
8336 +++ b/arch/x86/include/asm/cache.h
8337 @@ -5,12 +5,13 @@
8338
8339 /* L1 cache line size */
8340 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8341 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8342 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8343
8344 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8345 +#define __read_only __attribute__((__section__(".data..read_only")))
8346
8347 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8348 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8349 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8350
8351 #ifdef CONFIG_X86_VSMP
8352 #ifdef CONFIG_SMP
8353 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8354 index 4e12668..501d239 100644
8355 --- a/arch/x86/include/asm/cacheflush.h
8356 +++ b/arch/x86/include/asm/cacheflush.h
8357 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8358 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8359
8360 if (pg_flags == _PGMT_DEFAULT)
8361 - return -1;
8362 + return ~0UL;
8363 else if (pg_flags == _PGMT_WC)
8364 return _PAGE_CACHE_WC;
8365 else if (pg_flags == _PGMT_UC_MINUS)
8366 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8367 index 46fc474..b02b0f9 100644
8368 --- a/arch/x86/include/asm/checksum_32.h
8369 +++ b/arch/x86/include/asm/checksum_32.h
8370 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8371 int len, __wsum sum,
8372 int *src_err_ptr, int *dst_err_ptr);
8373
8374 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8375 + int len, __wsum sum,
8376 + int *src_err_ptr, int *dst_err_ptr);
8377 +
8378 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8379 + int len, __wsum sum,
8380 + int *src_err_ptr, int *dst_err_ptr);
8381 +
8382 /*
8383 * Note: when you get a NULL pointer exception here this means someone
8384 * passed in an incorrect kernel address to one of these functions.
8385 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8386 int *err_ptr)
8387 {
8388 might_sleep();
8389 - return csum_partial_copy_generic((__force void *)src, dst,
8390 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8391 len, sum, err_ptr, NULL);
8392 }
8393
8394 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8395 {
8396 might_sleep();
8397 if (access_ok(VERIFY_WRITE, dst, len))
8398 - return csum_partial_copy_generic(src, (__force void *)dst,
8399 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8400 len, sum, NULL, err_ptr);
8401
8402 if (len)
8403 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8404 index 5d3acdf..6447a02 100644
8405 --- a/arch/x86/include/asm/cmpxchg.h
8406 +++ b/arch/x86/include/asm/cmpxchg.h
8407 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8408 __compiletime_error("Bad argument size for cmpxchg");
8409 extern void __xadd_wrong_size(void)
8410 __compiletime_error("Bad argument size for xadd");
8411 +extern void __xadd_check_overflow_wrong_size(void)
8412 + __compiletime_error("Bad argument size for xadd_check_overflow");
8413
8414 /*
8415 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8416 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8417 __ret; \
8418 })
8419
8420 +#define __xadd_check_overflow(ptr, inc, lock) \
8421 + ({ \
8422 + __typeof__ (*(ptr)) __ret = (inc); \
8423 + switch (sizeof(*(ptr))) { \
8424 + case __X86_CASE_L: \
8425 + asm volatile (lock "xaddl %0, %1\n" \
8426 + "jno 0f\n" \
8427 + "mov %0,%1\n" \
8428 + "int $4\n0:\n" \
8429 + _ASM_EXTABLE(0b, 0b) \
8430 + : "+r" (__ret), "+m" (*(ptr)) \
8431 + : : "memory", "cc"); \
8432 + break; \
8433 + case __X86_CASE_Q: \
8434 + asm volatile (lock "xaddq %q0, %1\n" \
8435 + "jno 0f\n" \
8436 + "mov %0,%1\n" \
8437 + "int $4\n0:\n" \
8438 + _ASM_EXTABLE(0b, 0b) \
8439 + : "+r" (__ret), "+m" (*(ptr)) \
8440 + : : "memory", "cc"); \
8441 + break; \
8442 + default: \
8443 + __xadd_check_overflow_wrong_size(); \
8444 + } \
8445 + __ret; \
8446 + })
8447 +
8448 /*
8449 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8450 * value of "*ptr".
8451 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8452 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8453 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8454
8455 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8456 +
8457 #endif /* ASM_X86_CMPXCHG_H */
8458 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8459 index f3444f7..051a196 100644
8460 --- a/arch/x86/include/asm/cpufeature.h
8461 +++ b/arch/x86/include/asm/cpufeature.h
8462 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8463 ".section .discard,\"aw\",@progbits\n"
8464 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8465 ".previous\n"
8466 - ".section .altinstr_replacement,\"ax\"\n"
8467 + ".section .altinstr_replacement,\"a\"\n"
8468 "3: movb $1,%0\n"
8469 "4:\n"
8470 ".previous\n"
8471 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8472 index 41935fa..3b40db8 100644
8473 --- a/arch/x86/include/asm/desc.h
8474 +++ b/arch/x86/include/asm/desc.h
8475 @@ -4,6 +4,7 @@
8476 #include <asm/desc_defs.h>
8477 #include <asm/ldt.h>
8478 #include <asm/mmu.h>
8479 +#include <asm/pgtable.h>
8480
8481 #include <linux/smp.h>
8482
8483 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8484
8485 desc->type = (info->read_exec_only ^ 1) << 1;
8486 desc->type |= info->contents << 2;
8487 + desc->type |= info->seg_not_present ^ 1;
8488
8489 desc->s = 1;
8490 desc->dpl = 0x3;
8491 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8492 }
8493
8494 extern struct desc_ptr idt_descr;
8495 -extern gate_desc idt_table[];
8496 -
8497 -struct gdt_page {
8498 - struct desc_struct gdt[GDT_ENTRIES];
8499 -} __attribute__((aligned(PAGE_SIZE)));
8500 -
8501 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8502 +extern gate_desc idt_table[256];
8503
8504 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8505 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8506 {
8507 - return per_cpu(gdt_page, cpu).gdt;
8508 + return cpu_gdt_table[cpu];
8509 }
8510
8511 #ifdef CONFIG_X86_64
8512 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8513 unsigned long base, unsigned dpl, unsigned flags,
8514 unsigned short seg)
8515 {
8516 - gate->a = (seg << 16) | (base & 0xffff);
8517 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8518 + gate->gate.offset_low = base;
8519 + gate->gate.seg = seg;
8520 + gate->gate.reserved = 0;
8521 + gate->gate.type = type;
8522 + gate->gate.s = 0;
8523 + gate->gate.dpl = dpl;
8524 + gate->gate.p = 1;
8525 + gate->gate.offset_high = base >> 16;
8526 }
8527
8528 #endif
8529 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8530
8531 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8532 {
8533 + pax_open_kernel();
8534 memcpy(&idt[entry], gate, sizeof(*gate));
8535 + pax_close_kernel();
8536 }
8537
8538 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8539 {
8540 + pax_open_kernel();
8541 memcpy(&ldt[entry], desc, 8);
8542 + pax_close_kernel();
8543 }
8544
8545 static inline void
8546 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8547 default: size = sizeof(*gdt); break;
8548 }
8549
8550 + pax_open_kernel();
8551 memcpy(&gdt[entry], desc, size);
8552 + pax_close_kernel();
8553 }
8554
8555 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8556 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8557
8558 static inline void native_load_tr_desc(void)
8559 {
8560 + pax_open_kernel();
8561 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8562 + pax_close_kernel();
8563 }
8564
8565 static inline void native_load_gdt(const struct desc_ptr *dtr)
8566 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8567 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8568 unsigned int i;
8569
8570 + pax_open_kernel();
8571 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8572 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8573 + pax_close_kernel();
8574 }
8575
8576 #define _LDT_empty(info) \
8577 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8578 desc->limit = (limit >> 16) & 0xf;
8579 }
8580
8581 -static inline void _set_gate(int gate, unsigned type, void *addr,
8582 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8583 unsigned dpl, unsigned ist, unsigned seg)
8584 {
8585 gate_desc s;
8586 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8587 * Pentium F0 0F bugfix can have resulted in the mapped
8588 * IDT being write-protected.
8589 */
8590 -static inline void set_intr_gate(unsigned int n, void *addr)
8591 +static inline void set_intr_gate(unsigned int n, const void *addr)
8592 {
8593 BUG_ON((unsigned)n > 0xFF);
8594 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8595 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8596 /*
8597 * This routine sets up an interrupt gate at directory privilege level 3.
8598 */
8599 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8600 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8601 {
8602 BUG_ON((unsigned)n > 0xFF);
8603 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8604 }
8605
8606 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8607 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8608 {
8609 BUG_ON((unsigned)n > 0xFF);
8610 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8611 }
8612
8613 -static inline void set_trap_gate(unsigned int n, void *addr)
8614 +static inline void set_trap_gate(unsigned int n, const void *addr)
8615 {
8616 BUG_ON((unsigned)n > 0xFF);
8617 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8618 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8619 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8620 {
8621 BUG_ON((unsigned)n > 0xFF);
8622 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8623 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8624 }
8625
8626 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8627 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8628 {
8629 BUG_ON((unsigned)n > 0xFF);
8630 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8631 }
8632
8633 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8634 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8635 {
8636 BUG_ON((unsigned)n > 0xFF);
8637 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8638 }
8639
8640 +#ifdef CONFIG_X86_32
8641 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8642 +{
8643 + struct desc_struct d;
8644 +
8645 + if (likely(limit))
8646 + limit = (limit - 1UL) >> PAGE_SHIFT;
8647 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8648 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8649 +}
8650 +#endif
8651 +
8652 #endif /* _ASM_X86_DESC_H */
8653 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8654 index 278441f..b95a174 100644
8655 --- a/arch/x86/include/asm/desc_defs.h
8656 +++ b/arch/x86/include/asm/desc_defs.h
8657 @@ -31,6 +31,12 @@ struct desc_struct {
8658 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8659 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8660 };
8661 + struct {
8662 + u16 offset_low;
8663 + u16 seg;
8664 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8665 + unsigned offset_high: 16;
8666 + } gate;
8667 };
8668 } __attribute__((packed));
8669
8670 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8671 index 908b969..a1f4eb4 100644
8672 --- a/arch/x86/include/asm/e820.h
8673 +++ b/arch/x86/include/asm/e820.h
8674 @@ -69,7 +69,7 @@ struct e820map {
8675 #define ISA_START_ADDRESS 0xa0000
8676 #define ISA_END_ADDRESS 0x100000
8677
8678 -#define BIOS_BEGIN 0x000a0000
8679 +#define BIOS_BEGIN 0x000c0000
8680 #define BIOS_END 0x00100000
8681
8682 #define BIOS_ROM_BASE 0xffe00000
8683 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8684 index 5f962df..7289f09 100644
8685 --- a/arch/x86/include/asm/elf.h
8686 +++ b/arch/x86/include/asm/elf.h
8687 @@ -238,7 +238,25 @@ extern int force_personality32;
8688 the loader. We need to make sure that it is out of the way of the program
8689 that it will "exec", and that there is sufficient room for the brk. */
8690
8691 +#ifdef CONFIG_PAX_SEGMEXEC
8692 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8693 +#else
8694 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8695 +#endif
8696 +
8697 +#ifdef CONFIG_PAX_ASLR
8698 +#ifdef CONFIG_X86_32
8699 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8700 +
8701 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8702 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8703 +#else
8704 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8705 +
8706 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8707 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8708 +#endif
8709 +#endif
8710
8711 /* This yields a mask that user programs can use to figure out what
8712 instruction set this CPU supports. This could be done in user space,
8713 @@ -291,9 +309,7 @@ do { \
8714
8715 #define ARCH_DLINFO \
8716 do { \
8717 - if (vdso_enabled) \
8718 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8719 - (unsigned long)current->mm->context.vdso); \
8720 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8721 } while (0)
8722
8723 #define AT_SYSINFO 32
8724 @@ -304,7 +320,7 @@ do { \
8725
8726 #endif /* !CONFIG_X86_32 */
8727
8728 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8729 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8730
8731 #define VDSO_ENTRY \
8732 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8733 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8734 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8735 #define compat_arch_setup_additional_pages syscall32_setup_pages
8736
8737 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8738 -#define arch_randomize_brk arch_randomize_brk
8739 -
8740 /*
8741 * True on X86_32 or when emulating IA32 on X86_64
8742 */
8743 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8744 index cc70c1c..d96d011 100644
8745 --- a/arch/x86/include/asm/emergency-restart.h
8746 +++ b/arch/x86/include/asm/emergency-restart.h
8747 @@ -15,6 +15,6 @@ enum reboot_type {
8748
8749 extern enum reboot_type reboot_type;
8750
8751 -extern void machine_emergency_restart(void);
8752 +extern void machine_emergency_restart(void) __noreturn;
8753
8754 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8755 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8756 index d09bb03..4ea4194 100644
8757 --- a/arch/x86/include/asm/futex.h
8758 +++ b/arch/x86/include/asm/futex.h
8759 @@ -12,16 +12,18 @@
8760 #include <asm/system.h>
8761
8762 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8763 + typecheck(u32 __user *, uaddr); \
8764 asm volatile("1:\t" insn "\n" \
8765 "2:\t.section .fixup,\"ax\"\n" \
8766 "3:\tmov\t%3, %1\n" \
8767 "\tjmp\t2b\n" \
8768 "\t.previous\n" \
8769 _ASM_EXTABLE(1b, 3b) \
8770 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8771 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8772 : "i" (-EFAULT), "0" (oparg), "1" (0))
8773
8774 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8775 + typecheck(u32 __user *, uaddr); \
8776 asm volatile("1:\tmovl %2, %0\n" \
8777 "\tmovl\t%0, %3\n" \
8778 "\t" insn "\n" \
8779 @@ -34,7 +36,7 @@
8780 _ASM_EXTABLE(1b, 4b) \
8781 _ASM_EXTABLE(2b, 4b) \
8782 : "=&a" (oldval), "=&r" (ret), \
8783 - "+m" (*uaddr), "=&r" (tem) \
8784 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8785 : "r" (oparg), "i" (-EFAULT), "1" (0))
8786
8787 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8788 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8789
8790 switch (op) {
8791 case FUTEX_OP_SET:
8792 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8793 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8794 break;
8795 case FUTEX_OP_ADD:
8796 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8797 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8798 uaddr, oparg);
8799 break;
8800 case FUTEX_OP_OR:
8801 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8802 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8803 return -EFAULT;
8804
8805 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8806 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8807 "2:\t.section .fixup, \"ax\"\n"
8808 "3:\tmov %3, %0\n"
8809 "\tjmp 2b\n"
8810 "\t.previous\n"
8811 _ASM_EXTABLE(1b, 3b)
8812 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8813 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8814 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8815 : "memory"
8816 );
8817 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8818 index eb92a6e..b98b2f4 100644
8819 --- a/arch/x86/include/asm/hw_irq.h
8820 +++ b/arch/x86/include/asm/hw_irq.h
8821 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8822 extern void enable_IO_APIC(void);
8823
8824 /* Statistics */
8825 -extern atomic_t irq_err_count;
8826 -extern atomic_t irq_mis_count;
8827 +extern atomic_unchecked_t irq_err_count;
8828 +extern atomic_unchecked_t irq_mis_count;
8829
8830 /* EISA */
8831 extern void eisa_set_level_irq(unsigned int irq);
8832 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8833 index c9e09ea..73888df 100644
8834 --- a/arch/x86/include/asm/i387.h
8835 +++ b/arch/x86/include/asm/i387.h
8836 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8837 {
8838 int err;
8839
8840 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8841 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8842 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8843 +#endif
8844 +
8845 /* See comment in fxsave() below. */
8846 #ifdef CONFIG_AS_FXSAVEQ
8847 asm volatile("1: fxrstorq %[fx]\n\t"
8848 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8849 {
8850 int err;
8851
8852 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8853 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8854 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8855 +#endif
8856 +
8857 /*
8858 * Clear the bytes not touched by the fxsave and reserved
8859 * for the SW usage.
8860 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8861 #endif /* CONFIG_X86_64 */
8862
8863 /* We need a safe address that is cheap to find and that is already
8864 - in L1 during context switch. The best choices are unfortunately
8865 - different for UP and SMP */
8866 -#ifdef CONFIG_SMP
8867 -#define safe_address (__per_cpu_offset[0])
8868 -#else
8869 -#define safe_address (kstat_cpu(0).cpustat.user)
8870 -#endif
8871 + in L1 during context switch. */
8872 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8873
8874 /*
8875 * These must be called with preempt disabled
8876 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8877 struct thread_info *me = current_thread_info();
8878 preempt_disable();
8879 if (me->status & TS_USEDFPU)
8880 - __save_init_fpu(me->task);
8881 + __save_init_fpu(current);
8882 else
8883 clts();
8884 }
8885 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8886 index d8e8eef..99f81ae 100644
8887 --- a/arch/x86/include/asm/io.h
8888 +++ b/arch/x86/include/asm/io.h
8889 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8890
8891 #include <linux/vmalloc.h>
8892
8893 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8894 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8895 +{
8896 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8897 +}
8898 +
8899 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8900 +{
8901 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8902 +}
8903 +
8904 /*
8905 * Convert a virtual cached pointer to an uncached pointer
8906 */
8907 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8908 index bba3cf8..06bc8da 100644
8909 --- a/arch/x86/include/asm/irqflags.h
8910 +++ b/arch/x86/include/asm/irqflags.h
8911 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8912 sti; \
8913 sysexit
8914
8915 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8916 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8917 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8918 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8919 +
8920 #else
8921 #define INTERRUPT_RETURN iret
8922 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8923 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8924 index 5478825..839e88c 100644
8925 --- a/arch/x86/include/asm/kprobes.h
8926 +++ b/arch/x86/include/asm/kprobes.h
8927 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8928 #define RELATIVEJUMP_SIZE 5
8929 #define RELATIVECALL_OPCODE 0xe8
8930 #define RELATIVE_ADDR_SIZE 4
8931 -#define MAX_STACK_SIZE 64
8932 -#define MIN_STACK_SIZE(ADDR) \
8933 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8934 - THREAD_SIZE - (unsigned long)(ADDR))) \
8935 - ? (MAX_STACK_SIZE) \
8936 - : (((unsigned long)current_thread_info()) + \
8937 - THREAD_SIZE - (unsigned long)(ADDR)))
8938 +#define MAX_STACK_SIZE 64UL
8939 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8940
8941 #define flush_insn_slot(p) do { } while (0)
8942
8943 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8944 index b4973f4..7c4d3fc 100644
8945 --- a/arch/x86/include/asm/kvm_host.h
8946 +++ b/arch/x86/include/asm/kvm_host.h
8947 @@ -459,7 +459,7 @@ struct kvm_arch {
8948 unsigned int n_requested_mmu_pages;
8949 unsigned int n_max_mmu_pages;
8950 unsigned int indirect_shadow_pages;
8951 - atomic_t invlpg_counter;
8952 + atomic_unchecked_t invlpg_counter;
8953 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8954 /*
8955 * Hash table of struct kvm_mmu_page.
8956 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
8957 int (*check_intercept)(struct kvm_vcpu *vcpu,
8958 struct x86_instruction_info *info,
8959 enum x86_intercept_stage stage);
8960 -};
8961 +} __do_const;
8962
8963 struct kvm_arch_async_pf {
8964 u32 token;
8965 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8966 index 9cdae5d..300d20f 100644
8967 --- a/arch/x86/include/asm/local.h
8968 +++ b/arch/x86/include/asm/local.h
8969 @@ -18,26 +18,58 @@ typedef struct {
8970
8971 static inline void local_inc(local_t *l)
8972 {
8973 - asm volatile(_ASM_INC "%0"
8974 + asm volatile(_ASM_INC "%0\n"
8975 +
8976 +#ifdef CONFIG_PAX_REFCOUNT
8977 + "jno 0f\n"
8978 + _ASM_DEC "%0\n"
8979 + "int $4\n0:\n"
8980 + _ASM_EXTABLE(0b, 0b)
8981 +#endif
8982 +
8983 : "+m" (l->a.counter));
8984 }
8985
8986 static inline void local_dec(local_t *l)
8987 {
8988 - asm volatile(_ASM_DEC "%0"
8989 + asm volatile(_ASM_DEC "%0\n"
8990 +
8991 +#ifdef CONFIG_PAX_REFCOUNT
8992 + "jno 0f\n"
8993 + _ASM_INC "%0\n"
8994 + "int $4\n0:\n"
8995 + _ASM_EXTABLE(0b, 0b)
8996 +#endif
8997 +
8998 : "+m" (l->a.counter));
8999 }
9000
9001 static inline void local_add(long i, local_t *l)
9002 {
9003 - asm volatile(_ASM_ADD "%1,%0"
9004 + asm volatile(_ASM_ADD "%1,%0\n"
9005 +
9006 +#ifdef CONFIG_PAX_REFCOUNT
9007 + "jno 0f\n"
9008 + _ASM_SUB "%1,%0\n"
9009 + "int $4\n0:\n"
9010 + _ASM_EXTABLE(0b, 0b)
9011 +#endif
9012 +
9013 : "+m" (l->a.counter)
9014 : "ir" (i));
9015 }
9016
9017 static inline void local_sub(long i, local_t *l)
9018 {
9019 - asm volatile(_ASM_SUB "%1,%0"
9020 + asm volatile(_ASM_SUB "%1,%0\n"
9021 +
9022 +#ifdef CONFIG_PAX_REFCOUNT
9023 + "jno 0f\n"
9024 + _ASM_ADD "%1,%0\n"
9025 + "int $4\n0:\n"
9026 + _ASM_EXTABLE(0b, 0b)
9027 +#endif
9028 +
9029 : "+m" (l->a.counter)
9030 : "ir" (i));
9031 }
9032 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
9033 {
9034 unsigned char c;
9035
9036 - asm volatile(_ASM_SUB "%2,%0; sete %1"
9037 + asm volatile(_ASM_SUB "%2,%0\n"
9038 +
9039 +#ifdef CONFIG_PAX_REFCOUNT
9040 + "jno 0f\n"
9041 + _ASM_ADD "%2,%0\n"
9042 + "int $4\n0:\n"
9043 + _ASM_EXTABLE(0b, 0b)
9044 +#endif
9045 +
9046 + "sete %1\n"
9047 : "+m" (l->a.counter), "=qm" (c)
9048 : "ir" (i) : "memory");
9049 return c;
9050 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
9051 {
9052 unsigned char c;
9053
9054 - asm volatile(_ASM_DEC "%0; sete %1"
9055 + asm volatile(_ASM_DEC "%0\n"
9056 +
9057 +#ifdef CONFIG_PAX_REFCOUNT
9058 + "jno 0f\n"
9059 + _ASM_INC "%0\n"
9060 + "int $4\n0:\n"
9061 + _ASM_EXTABLE(0b, 0b)
9062 +#endif
9063 +
9064 + "sete %1\n"
9065 : "+m" (l->a.counter), "=qm" (c)
9066 : : "memory");
9067 return c != 0;
9068 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
9069 {
9070 unsigned char c;
9071
9072 - asm volatile(_ASM_INC "%0; sete %1"
9073 + asm volatile(_ASM_INC "%0\n"
9074 +
9075 +#ifdef CONFIG_PAX_REFCOUNT
9076 + "jno 0f\n"
9077 + _ASM_DEC "%0\n"
9078 + "int $4\n0:\n"
9079 + _ASM_EXTABLE(0b, 0b)
9080 +#endif
9081 +
9082 + "sete %1\n"
9083 : "+m" (l->a.counter), "=qm" (c)
9084 : : "memory");
9085 return c != 0;
9086 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
9087 {
9088 unsigned char c;
9089
9090 - asm volatile(_ASM_ADD "%2,%0; sets %1"
9091 + asm volatile(_ASM_ADD "%2,%0\n"
9092 +
9093 +#ifdef CONFIG_PAX_REFCOUNT
9094 + "jno 0f\n"
9095 + _ASM_SUB "%2,%0\n"
9096 + "int $4\n0:\n"
9097 + _ASM_EXTABLE(0b, 0b)
9098 +#endif
9099 +
9100 + "sets %1\n"
9101 : "+m" (l->a.counter), "=qm" (c)
9102 : "ir" (i) : "memory");
9103 return c;
9104 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
9105 #endif
9106 /* Modern 486+ processor */
9107 __i = i;
9108 - asm volatile(_ASM_XADD "%0, %1;"
9109 + asm volatile(_ASM_XADD "%0, %1\n"
9110 +
9111 +#ifdef CONFIG_PAX_REFCOUNT
9112 + "jno 0f\n"
9113 + _ASM_MOV "%0,%1\n"
9114 + "int $4\n0:\n"
9115 + _ASM_EXTABLE(0b, 0b)
9116 +#endif
9117 +
9118 : "+r" (i), "+m" (l->a.counter)
9119 : : "memory");
9120 return i + __i;
9121 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
9122 index 593e51d..fa69c9a 100644
9123 --- a/arch/x86/include/asm/mman.h
9124 +++ b/arch/x86/include/asm/mman.h
9125 @@ -5,4 +5,14 @@
9126
9127 #include <asm-generic/mman.h>
9128
9129 +#ifdef __KERNEL__
9130 +#ifndef __ASSEMBLY__
9131 +#ifdef CONFIG_X86_32
9132 +#define arch_mmap_check i386_mmap_check
9133 +int i386_mmap_check(unsigned long addr, unsigned long len,
9134 + unsigned long flags);
9135 +#endif
9136 +#endif
9137 +#endif
9138 +
9139 #endif /* _ASM_X86_MMAN_H */
9140 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
9141 index 5f55e69..e20bfb1 100644
9142 --- a/arch/x86/include/asm/mmu.h
9143 +++ b/arch/x86/include/asm/mmu.h
9144 @@ -9,7 +9,7 @@
9145 * we put the segment information here.
9146 */
9147 typedef struct {
9148 - void *ldt;
9149 + struct desc_struct *ldt;
9150 int size;
9151
9152 #ifdef CONFIG_X86_64
9153 @@ -18,7 +18,19 @@ typedef struct {
9154 #endif
9155
9156 struct mutex lock;
9157 - void *vdso;
9158 + unsigned long vdso;
9159 +
9160 +#ifdef CONFIG_X86_32
9161 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9162 + unsigned long user_cs_base;
9163 + unsigned long user_cs_limit;
9164 +
9165 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9166 + cpumask_t cpu_user_cs_mask;
9167 +#endif
9168 +
9169 +#endif
9170 +#endif
9171 } mm_context_t;
9172
9173 #ifdef CONFIG_SMP
9174 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
9175 index 6902152..399f3a2 100644
9176 --- a/arch/x86/include/asm/mmu_context.h
9177 +++ b/arch/x86/include/asm/mmu_context.h
9178 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
9179
9180 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9181 {
9182 +
9183 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9184 + unsigned int i;
9185 + pgd_t *pgd;
9186 +
9187 + pax_open_kernel();
9188 + pgd = get_cpu_pgd(smp_processor_id());
9189 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9190 + set_pgd_batched(pgd+i, native_make_pgd(0));
9191 + pax_close_kernel();
9192 +#endif
9193 +
9194 #ifdef CONFIG_SMP
9195 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9196 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9197 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9198 struct task_struct *tsk)
9199 {
9200 unsigned cpu = smp_processor_id();
9201 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9202 + int tlbstate = TLBSTATE_OK;
9203 +#endif
9204
9205 if (likely(prev != next)) {
9206 #ifdef CONFIG_SMP
9207 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9208 + tlbstate = percpu_read(cpu_tlbstate.state);
9209 +#endif
9210 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9211 percpu_write(cpu_tlbstate.active_mm, next);
9212 #endif
9213 cpumask_set_cpu(cpu, mm_cpumask(next));
9214
9215 /* Re-load page tables */
9216 +#ifdef CONFIG_PAX_PER_CPU_PGD
9217 + pax_open_kernel();
9218 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9219 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9220 + pax_close_kernel();
9221 + load_cr3(get_cpu_pgd(cpu));
9222 +#else
9223 load_cr3(next->pgd);
9224 +#endif
9225
9226 /* stop flush ipis for the previous mm */
9227 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9228 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9229 */
9230 if (unlikely(prev->context.ldt != next->context.ldt))
9231 load_LDT_nolock(&next->context);
9232 - }
9233 +
9234 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9235 + if (!(__supported_pte_mask & _PAGE_NX)) {
9236 + smp_mb__before_clear_bit();
9237 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9238 + smp_mb__after_clear_bit();
9239 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9240 + }
9241 +#endif
9242 +
9243 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9244 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9245 + prev->context.user_cs_limit != next->context.user_cs_limit))
9246 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9247 #ifdef CONFIG_SMP
9248 + else if (unlikely(tlbstate != TLBSTATE_OK))
9249 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9250 +#endif
9251 +#endif
9252 +
9253 + }
9254 else {
9255 +
9256 +#ifdef CONFIG_PAX_PER_CPU_PGD
9257 + pax_open_kernel();
9258 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9259 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9260 + pax_close_kernel();
9261 + load_cr3(get_cpu_pgd(cpu));
9262 +#endif
9263 +
9264 +#ifdef CONFIG_SMP
9265 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9266 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9267
9268 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9269 * tlb flush IPI delivery. We must reload CR3
9270 * to make sure to use no freed page tables.
9271 */
9272 +
9273 +#ifndef CONFIG_PAX_PER_CPU_PGD
9274 load_cr3(next->pgd);
9275 +#endif
9276 +
9277 load_LDT_nolock(&next->context);
9278 +
9279 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9280 + if (!(__supported_pte_mask & _PAGE_NX))
9281 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9282 +#endif
9283 +
9284 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9285 +#ifdef CONFIG_PAX_PAGEEXEC
9286 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9287 +#endif
9288 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9289 +#endif
9290 +
9291 }
9292 +#endif
9293 }
9294 -#endif
9295 }
9296
9297 #define activate_mm(prev, next) \
9298 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9299 index 9eae775..c914fea 100644
9300 --- a/arch/x86/include/asm/module.h
9301 +++ b/arch/x86/include/asm/module.h
9302 @@ -5,6 +5,7 @@
9303
9304 #ifdef CONFIG_X86_64
9305 /* X86_64 does not define MODULE_PROC_FAMILY */
9306 +#define MODULE_PROC_FAMILY ""
9307 #elif defined CONFIG_M386
9308 #define MODULE_PROC_FAMILY "386 "
9309 #elif defined CONFIG_M486
9310 @@ -59,8 +60,20 @@
9311 #error unknown processor family
9312 #endif
9313
9314 -#ifdef CONFIG_X86_32
9315 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9316 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9317 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9318 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9319 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9320 +#else
9321 +#define MODULE_PAX_KERNEXEC ""
9322 #endif
9323
9324 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9325 +#define MODULE_PAX_UDEREF "UDEREF "
9326 +#else
9327 +#define MODULE_PAX_UDEREF ""
9328 +#endif
9329 +
9330 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9331 +
9332 #endif /* _ASM_X86_MODULE_H */
9333 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9334 index 7639dbf..e08a58c 100644
9335 --- a/arch/x86/include/asm/page_64_types.h
9336 +++ b/arch/x86/include/asm/page_64_types.h
9337 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9338
9339 /* duplicated to the one in bootmem.h */
9340 extern unsigned long max_pfn;
9341 -extern unsigned long phys_base;
9342 +extern const unsigned long phys_base;
9343
9344 extern unsigned long __phys_addr(unsigned long);
9345 #define __phys_reloc_hide(x) (x)
9346 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9347 index a7d2db9..edb023e 100644
9348 --- a/arch/x86/include/asm/paravirt.h
9349 +++ b/arch/x86/include/asm/paravirt.h
9350 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9351 val);
9352 }
9353
9354 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9355 +{
9356 + pgdval_t val = native_pgd_val(pgd);
9357 +
9358 + if (sizeof(pgdval_t) > sizeof(long))
9359 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9360 + val, (u64)val >> 32);
9361 + else
9362 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9363 + val);
9364 +}
9365 +
9366 static inline void pgd_clear(pgd_t *pgdp)
9367 {
9368 set_pgd(pgdp, __pgd(0));
9369 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9370 pv_mmu_ops.set_fixmap(idx, phys, flags);
9371 }
9372
9373 +#ifdef CONFIG_PAX_KERNEXEC
9374 +static inline unsigned long pax_open_kernel(void)
9375 +{
9376 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9377 +}
9378 +
9379 +static inline unsigned long pax_close_kernel(void)
9380 +{
9381 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9382 +}
9383 +#else
9384 +static inline unsigned long pax_open_kernel(void) { return 0; }
9385 +static inline unsigned long pax_close_kernel(void) { return 0; }
9386 +#endif
9387 +
9388 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9389
9390 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9391 @@ -964,7 +991,7 @@ extern void default_banner(void);
9392
9393 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9394 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9395 -#define PARA_INDIRECT(addr) *%cs:addr
9396 +#define PARA_INDIRECT(addr) *%ss:addr
9397 #endif
9398
9399 #define INTERRUPT_RETURN \
9400 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9401 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9402 CLBR_NONE, \
9403 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9404 +
9405 +#define GET_CR0_INTO_RDI \
9406 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9407 + mov %rax,%rdi
9408 +
9409 +#define SET_RDI_INTO_CR0 \
9410 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9411 +
9412 +#define GET_CR3_INTO_RDI \
9413 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9414 + mov %rax,%rdi
9415 +
9416 +#define SET_RDI_INTO_CR3 \
9417 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9418 +
9419 #endif /* CONFIG_X86_32 */
9420
9421 #endif /* __ASSEMBLY__ */
9422 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9423 index 8e8b9a4..f07d725 100644
9424 --- a/arch/x86/include/asm/paravirt_types.h
9425 +++ b/arch/x86/include/asm/paravirt_types.h
9426 @@ -84,20 +84,20 @@ struct pv_init_ops {
9427 */
9428 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9429 unsigned long addr, unsigned len);
9430 -};
9431 +} __no_const;
9432
9433
9434 struct pv_lazy_ops {
9435 /* Set deferred update mode, used for batching operations. */
9436 void (*enter)(void);
9437 void (*leave)(void);
9438 -};
9439 +} __no_const;
9440
9441 struct pv_time_ops {
9442 unsigned long long (*sched_clock)(void);
9443 unsigned long long (*steal_clock)(int cpu);
9444 unsigned long (*get_tsc_khz)(void);
9445 -};
9446 +} __no_const;
9447
9448 struct pv_cpu_ops {
9449 /* hooks for various privileged instructions */
9450 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9451
9452 void (*start_context_switch)(struct task_struct *prev);
9453 void (*end_context_switch)(struct task_struct *next);
9454 -};
9455 +} __no_const;
9456
9457 struct pv_irq_ops {
9458 /*
9459 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9460 unsigned long start_eip,
9461 unsigned long start_esp);
9462 #endif
9463 -};
9464 +} __no_const;
9465
9466 struct pv_mmu_ops {
9467 unsigned long (*read_cr2)(void);
9468 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9469 struct paravirt_callee_save make_pud;
9470
9471 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9472 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9473 #endif /* PAGETABLE_LEVELS == 4 */
9474 #endif /* PAGETABLE_LEVELS >= 3 */
9475
9476 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9477 an mfn. We can tell which is which from the index. */
9478 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9479 phys_addr_t phys, pgprot_t flags);
9480 +
9481 +#ifdef CONFIG_PAX_KERNEXEC
9482 + unsigned long (*pax_open_kernel)(void);
9483 + unsigned long (*pax_close_kernel)(void);
9484 +#endif
9485 +
9486 };
9487
9488 struct arch_spinlock;
9489 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9490 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9491 int (*spin_trylock)(struct arch_spinlock *lock);
9492 void (*spin_unlock)(struct arch_spinlock *lock);
9493 -};
9494 +} __no_const;
9495
9496 /* This contains all the paravirt structures: we get a convenient
9497 * number for each function using the offset which we use to indicate
9498 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9499 index b4389a4..b7ff22c 100644
9500 --- a/arch/x86/include/asm/pgalloc.h
9501 +++ b/arch/x86/include/asm/pgalloc.h
9502 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9503 pmd_t *pmd, pte_t *pte)
9504 {
9505 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9506 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9507 +}
9508 +
9509 +static inline void pmd_populate_user(struct mm_struct *mm,
9510 + pmd_t *pmd, pte_t *pte)
9511 +{
9512 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9513 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9514 }
9515
9516 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9517 index 98391db..8f6984e 100644
9518 --- a/arch/x86/include/asm/pgtable-2level.h
9519 +++ b/arch/x86/include/asm/pgtable-2level.h
9520 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9521
9522 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9523 {
9524 + pax_open_kernel();
9525 *pmdp = pmd;
9526 + pax_close_kernel();
9527 }
9528
9529 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9530 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9531 index effff47..f9e4035 100644
9532 --- a/arch/x86/include/asm/pgtable-3level.h
9533 +++ b/arch/x86/include/asm/pgtable-3level.h
9534 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9535
9536 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9537 {
9538 + pax_open_kernel();
9539 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9540 + pax_close_kernel();
9541 }
9542
9543 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9544 {
9545 + pax_open_kernel();
9546 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9547 + pax_close_kernel();
9548 }
9549
9550 /*
9551 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9552 index 18601c8..3d716d1 100644
9553 --- a/arch/x86/include/asm/pgtable.h
9554 +++ b/arch/x86/include/asm/pgtable.h
9555 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9556
9557 #ifndef __PAGETABLE_PUD_FOLDED
9558 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9559 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9560 #define pgd_clear(pgd) native_pgd_clear(pgd)
9561 #endif
9562
9563 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9564
9565 #define arch_end_context_switch(prev) do {} while(0)
9566
9567 +#define pax_open_kernel() native_pax_open_kernel()
9568 +#define pax_close_kernel() native_pax_close_kernel()
9569 #endif /* CONFIG_PARAVIRT */
9570
9571 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9572 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9573 +
9574 +#ifdef CONFIG_PAX_KERNEXEC
9575 +static inline unsigned long native_pax_open_kernel(void)
9576 +{
9577 + unsigned long cr0;
9578 +
9579 + preempt_disable();
9580 + barrier();
9581 + cr0 = read_cr0() ^ X86_CR0_WP;
9582 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9583 + write_cr0(cr0);
9584 + return cr0 ^ X86_CR0_WP;
9585 +}
9586 +
9587 +static inline unsigned long native_pax_close_kernel(void)
9588 +{
9589 + unsigned long cr0;
9590 +
9591 + cr0 = read_cr0() ^ X86_CR0_WP;
9592 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9593 + write_cr0(cr0);
9594 + barrier();
9595 + preempt_enable_no_resched();
9596 + return cr0 ^ X86_CR0_WP;
9597 +}
9598 +#else
9599 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9600 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9601 +#endif
9602 +
9603 /*
9604 * The following only work if pte_present() is true.
9605 * Undefined behaviour if not..
9606 */
9607 +static inline int pte_user(pte_t pte)
9608 +{
9609 + return pte_val(pte) & _PAGE_USER;
9610 +}
9611 +
9612 static inline int pte_dirty(pte_t pte)
9613 {
9614 return pte_flags(pte) & _PAGE_DIRTY;
9615 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9616 return pte_clear_flags(pte, _PAGE_RW);
9617 }
9618
9619 +static inline pte_t pte_mkread(pte_t pte)
9620 +{
9621 + return __pte(pte_val(pte) | _PAGE_USER);
9622 +}
9623 +
9624 static inline pte_t pte_mkexec(pte_t pte)
9625 {
9626 - return pte_clear_flags(pte, _PAGE_NX);
9627 +#ifdef CONFIG_X86_PAE
9628 + if (__supported_pte_mask & _PAGE_NX)
9629 + return pte_clear_flags(pte, _PAGE_NX);
9630 + else
9631 +#endif
9632 + return pte_set_flags(pte, _PAGE_USER);
9633 +}
9634 +
9635 +static inline pte_t pte_exprotect(pte_t pte)
9636 +{
9637 +#ifdef CONFIG_X86_PAE
9638 + if (__supported_pte_mask & _PAGE_NX)
9639 + return pte_set_flags(pte, _PAGE_NX);
9640 + else
9641 +#endif
9642 + return pte_clear_flags(pte, _PAGE_USER);
9643 }
9644
9645 static inline pte_t pte_mkdirty(pte_t pte)
9646 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9647 #endif
9648
9649 #ifndef __ASSEMBLY__
9650 +
9651 +#ifdef CONFIG_PAX_PER_CPU_PGD
9652 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9653 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9654 +{
9655 + return cpu_pgd[cpu];
9656 +}
9657 +#endif
9658 +
9659 #include <linux/mm_types.h>
9660
9661 static inline int pte_none(pte_t pte)
9662 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9663
9664 static inline int pgd_bad(pgd_t pgd)
9665 {
9666 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9667 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9668 }
9669
9670 static inline int pgd_none(pgd_t pgd)
9671 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9672 * pgd_offset() returns a (pgd_t *)
9673 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9674 */
9675 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9676 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9677 +
9678 +#ifdef CONFIG_PAX_PER_CPU_PGD
9679 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9680 +#endif
9681 +
9682 /*
9683 * a shortcut which implies the use of the kernel's pgd, instead
9684 * of a process's
9685 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9686 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9687 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9688
9689 +#ifdef CONFIG_X86_32
9690 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9691 +#else
9692 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9693 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9694 +
9695 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9696 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9697 +#else
9698 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9699 +#endif
9700 +
9701 +#endif
9702 +
9703 #ifndef __ASSEMBLY__
9704
9705 extern int direct_gbpages;
9706 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9707 * dst and src can be on the same page, but the range must not overlap,
9708 * and must not cross a page boundary.
9709 */
9710 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9711 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9712 {
9713 - memcpy(dst, src, count * sizeof(pgd_t));
9714 + pax_open_kernel();
9715 + while (count--)
9716 + *dst++ = *src++;
9717 + pax_close_kernel();
9718 }
9719
9720 +#ifdef CONFIG_PAX_PER_CPU_PGD
9721 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9722 +#endif
9723 +
9724 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9725 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9726 +#else
9727 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9728 +#endif
9729
9730 #include <asm-generic/pgtable.h>
9731 #endif /* __ASSEMBLY__ */
9732 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9733 index 0c92113..34a77c6 100644
9734 --- a/arch/x86/include/asm/pgtable_32.h
9735 +++ b/arch/x86/include/asm/pgtable_32.h
9736 @@ -25,9 +25,6 @@
9737 struct mm_struct;
9738 struct vm_area_struct;
9739
9740 -extern pgd_t swapper_pg_dir[1024];
9741 -extern pgd_t initial_page_table[1024];
9742 -
9743 static inline void pgtable_cache_init(void) { }
9744 static inline void check_pgt_cache(void) { }
9745 void paging_init(void);
9746 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9747 # include <asm/pgtable-2level.h>
9748 #endif
9749
9750 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9751 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9752 +#ifdef CONFIG_X86_PAE
9753 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9754 +#endif
9755 +
9756 #if defined(CONFIG_HIGHPTE)
9757 #define pte_offset_map(dir, address) \
9758 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9759 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9760 /* Clear a kernel PTE and flush it from the TLB */
9761 #define kpte_clear_flush(ptep, vaddr) \
9762 do { \
9763 + pax_open_kernel(); \
9764 pte_clear(&init_mm, (vaddr), (ptep)); \
9765 + pax_close_kernel(); \
9766 __flush_tlb_one((vaddr)); \
9767 } while (0)
9768
9769 @@ -74,6 +79,9 @@ do { \
9770
9771 #endif /* !__ASSEMBLY__ */
9772
9773 +#define HAVE_ARCH_UNMAPPED_AREA
9774 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9775 +
9776 /*
9777 * kern_addr_valid() is (1) for FLATMEM and (0) for
9778 * SPARSEMEM and DISCONTIGMEM
9779 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9780 index ed5903b..c7fe163 100644
9781 --- a/arch/x86/include/asm/pgtable_32_types.h
9782 +++ b/arch/x86/include/asm/pgtable_32_types.h
9783 @@ -8,7 +8,7 @@
9784 */
9785 #ifdef CONFIG_X86_PAE
9786 # include <asm/pgtable-3level_types.h>
9787 -# define PMD_SIZE (1UL << PMD_SHIFT)
9788 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9789 # define PMD_MASK (~(PMD_SIZE - 1))
9790 #else
9791 # include <asm/pgtable-2level_types.h>
9792 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9793 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9794 #endif
9795
9796 +#ifdef CONFIG_PAX_KERNEXEC
9797 +#ifndef __ASSEMBLY__
9798 +extern unsigned char MODULES_EXEC_VADDR[];
9799 +extern unsigned char MODULES_EXEC_END[];
9800 +#endif
9801 +#include <asm/boot.h>
9802 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9803 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9804 +#else
9805 +#define ktla_ktva(addr) (addr)
9806 +#define ktva_ktla(addr) (addr)
9807 +#endif
9808 +
9809 #define MODULES_VADDR VMALLOC_START
9810 #define MODULES_END VMALLOC_END
9811 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9812 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9813 index 975f709..107976d 100644
9814 --- a/arch/x86/include/asm/pgtable_64.h
9815 +++ b/arch/x86/include/asm/pgtable_64.h
9816 @@ -16,10 +16,14 @@
9817
9818 extern pud_t level3_kernel_pgt[512];
9819 extern pud_t level3_ident_pgt[512];
9820 +extern pud_t level3_vmalloc_start_pgt[512];
9821 +extern pud_t level3_vmalloc_end_pgt[512];
9822 +extern pud_t level3_vmemmap_pgt[512];
9823 +extern pud_t level2_vmemmap_pgt[512];
9824 extern pmd_t level2_kernel_pgt[512];
9825 extern pmd_t level2_fixmap_pgt[512];
9826 -extern pmd_t level2_ident_pgt[512];
9827 -extern pgd_t init_level4_pgt[];
9828 +extern pmd_t level2_ident_pgt[512*2];
9829 +extern pgd_t init_level4_pgt[512];
9830
9831 #define swapper_pg_dir init_level4_pgt
9832
9833 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9834
9835 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9836 {
9837 + pax_open_kernel();
9838 *pmdp = pmd;
9839 + pax_close_kernel();
9840 }
9841
9842 static inline void native_pmd_clear(pmd_t *pmd)
9843 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9844
9845 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9846 {
9847 + pax_open_kernel();
9848 + *pgdp = pgd;
9849 + pax_close_kernel();
9850 +}
9851 +
9852 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9853 +{
9854 *pgdp = pgd;
9855 }
9856
9857 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9858 index 766ea16..5b96cb3 100644
9859 --- a/arch/x86/include/asm/pgtable_64_types.h
9860 +++ b/arch/x86/include/asm/pgtable_64_types.h
9861 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9862 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9863 #define MODULES_END _AC(0xffffffffff000000, UL)
9864 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9865 +#define MODULES_EXEC_VADDR MODULES_VADDR
9866 +#define MODULES_EXEC_END MODULES_END
9867 +
9868 +#define ktla_ktva(addr) (addr)
9869 +#define ktva_ktla(addr) (addr)
9870
9871 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9872 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9873 index 013286a..8b42f4f 100644
9874 --- a/arch/x86/include/asm/pgtable_types.h
9875 +++ b/arch/x86/include/asm/pgtable_types.h
9876 @@ -16,13 +16,12 @@
9877 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9878 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9879 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9880 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9881 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9882 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9883 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9884 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9885 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9886 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9887 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9888 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9889 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9890 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9891
9892 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9893 @@ -40,7 +39,6 @@
9894 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9895 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9896 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9897 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9898 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9899 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9900 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9901 @@ -57,8 +55,10 @@
9902
9903 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9904 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9905 -#else
9906 +#elif defined(CONFIG_KMEMCHECK)
9907 #define _PAGE_NX (_AT(pteval_t, 0))
9908 +#else
9909 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9910 #endif
9911
9912 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9913 @@ -96,6 +96,9 @@
9914 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9915 _PAGE_ACCESSED)
9916
9917 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9918 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9919 +
9920 #define __PAGE_KERNEL_EXEC \
9921 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9922 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9923 @@ -106,7 +109,7 @@
9924 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9925 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9926 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9927 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9928 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9929 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9930 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9931 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9932 @@ -168,8 +171,8 @@
9933 * bits are combined, this will alow user to access the high address mapped
9934 * VDSO in the presence of CONFIG_COMPAT_VDSO
9935 */
9936 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9937 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9938 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9939 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9940 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9941 #endif
9942
9943 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9944 {
9945 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9946 }
9947 +#endif
9948
9949 +#if PAGETABLE_LEVELS == 3
9950 +#include <asm-generic/pgtable-nopud.h>
9951 +#endif
9952 +
9953 +#if PAGETABLE_LEVELS == 2
9954 +#include <asm-generic/pgtable-nopmd.h>
9955 +#endif
9956 +
9957 +#ifndef __ASSEMBLY__
9958 #if PAGETABLE_LEVELS > 3
9959 typedef struct { pudval_t pud; } pud_t;
9960
9961 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9962 return pud.pud;
9963 }
9964 #else
9965 -#include <asm-generic/pgtable-nopud.h>
9966 -
9967 static inline pudval_t native_pud_val(pud_t pud)
9968 {
9969 return native_pgd_val(pud.pgd);
9970 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9971 return pmd.pmd;
9972 }
9973 #else
9974 -#include <asm-generic/pgtable-nopmd.h>
9975 -
9976 static inline pmdval_t native_pmd_val(pmd_t pmd)
9977 {
9978 return native_pgd_val(pmd.pud.pgd);
9979 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9980
9981 extern pteval_t __supported_pte_mask;
9982 extern void set_nx(void);
9983 -extern int nx_enabled;
9984
9985 #define pgprot_writecombine pgprot_writecombine
9986 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9987 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9988 index b650435..eefa566 100644
9989 --- a/arch/x86/include/asm/processor.h
9990 +++ b/arch/x86/include/asm/processor.h
9991 @@ -268,7 +268,7 @@ struct tss_struct {
9992
9993 } ____cacheline_aligned;
9994
9995 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9996 +extern struct tss_struct init_tss[NR_CPUS];
9997
9998 /*
9999 * Save the original ist values for checking stack pointers during debugging
10000 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
10001 */
10002 #define TASK_SIZE PAGE_OFFSET
10003 #define TASK_SIZE_MAX TASK_SIZE
10004 +
10005 +#ifdef CONFIG_PAX_SEGMEXEC
10006 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
10007 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
10008 +#else
10009 #define STACK_TOP TASK_SIZE
10010 -#define STACK_TOP_MAX STACK_TOP
10011 +#endif
10012 +
10013 +#define STACK_TOP_MAX TASK_SIZE
10014
10015 #define INIT_THREAD { \
10016 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10017 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10018 .vm86_info = NULL, \
10019 .sysenter_cs = __KERNEL_CS, \
10020 .io_bitmap_ptr = NULL, \
10021 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
10022 */
10023 #define INIT_TSS { \
10024 .x86_tss = { \
10025 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10026 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10027 .ss0 = __KERNEL_DS, \
10028 .ss1 = __KERNEL_CS, \
10029 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10030 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
10031 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10032
10033 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10034 -#define KSTK_TOP(info) \
10035 -({ \
10036 - unsigned long *__ptr = (unsigned long *)(info); \
10037 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10038 -})
10039 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10040
10041 /*
10042 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10043 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10044 #define task_pt_regs(task) \
10045 ({ \
10046 struct pt_regs *__regs__; \
10047 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10048 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10049 __regs__ - 1; \
10050 })
10051
10052 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10053 /*
10054 * User space process size. 47bits minus one guard page.
10055 */
10056 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10057 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10058
10059 /* This decides where the kernel will search for a free chunk of vm
10060 * space during mmap's.
10061 */
10062 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10063 - 0xc0000000 : 0xFFFFe000)
10064 + 0xc0000000 : 0xFFFFf000)
10065
10066 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10067 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10068 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10069 #define STACK_TOP_MAX TASK_SIZE_MAX
10070
10071 #define INIT_THREAD { \
10072 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10073 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10074 }
10075
10076 #define INIT_TSS { \
10077 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10078 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10079 }
10080
10081 /*
10082 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
10083 */
10084 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10085
10086 +#ifdef CONFIG_PAX_SEGMEXEC
10087 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10088 +#endif
10089 +
10090 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10091
10092 /* Get/set a process' ability to use the timestamp counter instruction */
10093 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
10094 index 3566454..4bdfb8c 100644
10095 --- a/arch/x86/include/asm/ptrace.h
10096 +++ b/arch/x86/include/asm/ptrace.h
10097 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
10098 }
10099
10100 /*
10101 - * user_mode_vm(regs) determines whether a register set came from user mode.
10102 + * user_mode(regs) determines whether a register set came from user mode.
10103 * This is true if V8086 mode was enabled OR if the register set was from
10104 * protected mode with RPL-3 CS value. This tricky test checks that with
10105 * one comparison. Many places in the kernel can bypass this full check
10106 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10107 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10108 + * be used.
10109 */
10110 -static inline int user_mode(struct pt_regs *regs)
10111 +static inline int user_mode_novm(struct pt_regs *regs)
10112 {
10113 #ifdef CONFIG_X86_32
10114 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10115 #else
10116 - return !!(regs->cs & 3);
10117 + return !!(regs->cs & SEGMENT_RPL_MASK);
10118 #endif
10119 }
10120
10121 -static inline int user_mode_vm(struct pt_regs *regs)
10122 +static inline int user_mode(struct pt_regs *regs)
10123 {
10124 #ifdef CONFIG_X86_32
10125 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10126 USER_RPL;
10127 #else
10128 - return user_mode(regs);
10129 + return user_mode_novm(regs);
10130 #endif
10131 }
10132
10133 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
10134 #ifdef CONFIG_X86_64
10135 static inline bool user_64bit_mode(struct pt_regs *regs)
10136 {
10137 + unsigned long cs = regs->cs & 0xffff;
10138 #ifndef CONFIG_PARAVIRT
10139 /*
10140 * On non-paravirt systems, this is the only long mode CPL 3
10141 * selector. We do not allow long mode selectors in the LDT.
10142 */
10143 - return regs->cs == __USER_CS;
10144 + return cs == __USER_CS;
10145 #else
10146 /* Headers are too twisted for this to go in paravirt.h. */
10147 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
10148 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
10149 #endif
10150 }
10151 #endif
10152 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
10153 index 92f29706..a79cbbb 100644
10154 --- a/arch/x86/include/asm/reboot.h
10155 +++ b/arch/x86/include/asm/reboot.h
10156 @@ -6,19 +6,19 @@
10157 struct pt_regs;
10158
10159 struct machine_ops {
10160 - void (*restart)(char *cmd);
10161 - void (*halt)(void);
10162 - void (*power_off)(void);
10163 + void (* __noreturn restart)(char *cmd);
10164 + void (* __noreturn halt)(void);
10165 + void (* __noreturn power_off)(void);
10166 void (*shutdown)(void);
10167 void (*crash_shutdown)(struct pt_regs *);
10168 - void (*emergency_restart)(void);
10169 -};
10170 + void (* __noreturn emergency_restart)(void);
10171 +} __no_const;
10172
10173 extern struct machine_ops machine_ops;
10174
10175 void native_machine_crash_shutdown(struct pt_regs *regs);
10176 void native_machine_shutdown(void);
10177 -void machine_real_restart(unsigned int type);
10178 +void machine_real_restart(unsigned int type) __noreturn;
10179 /* These must match dispatch_table in reboot_32.S */
10180 #define MRR_BIOS 0
10181 #define MRR_APM 1
10182 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
10183 index 2dbe4a7..ce1db00 100644
10184 --- a/arch/x86/include/asm/rwsem.h
10185 +++ b/arch/x86/include/asm/rwsem.h
10186 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
10187 {
10188 asm volatile("# beginning down_read\n\t"
10189 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10190 +
10191 +#ifdef CONFIG_PAX_REFCOUNT
10192 + "jno 0f\n"
10193 + LOCK_PREFIX _ASM_DEC "(%1)\n"
10194 + "int $4\n0:\n"
10195 + _ASM_EXTABLE(0b, 0b)
10196 +#endif
10197 +
10198 /* adds 0x00000001 */
10199 " jns 1f\n"
10200 " call call_rwsem_down_read_failed\n"
10201 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
10202 "1:\n\t"
10203 " mov %1,%2\n\t"
10204 " add %3,%2\n\t"
10205 +
10206 +#ifdef CONFIG_PAX_REFCOUNT
10207 + "jno 0f\n"
10208 + "sub %3,%2\n"
10209 + "int $4\n0:\n"
10210 + _ASM_EXTABLE(0b, 0b)
10211 +#endif
10212 +
10213 " jle 2f\n\t"
10214 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10215 " jnz 1b\n\t"
10216 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
10217 long tmp;
10218 asm volatile("# beginning down_write\n\t"
10219 LOCK_PREFIX " xadd %1,(%2)\n\t"
10220 +
10221 +#ifdef CONFIG_PAX_REFCOUNT
10222 + "jno 0f\n"
10223 + "mov %1,(%2)\n"
10224 + "int $4\n0:\n"
10225 + _ASM_EXTABLE(0b, 0b)
10226 +#endif
10227 +
10228 /* adds 0xffff0001, returns the old value */
10229 " test %1,%1\n\t"
10230 /* was the count 0 before? */
10231 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
10232 long tmp;
10233 asm volatile("# beginning __up_read\n\t"
10234 LOCK_PREFIX " xadd %1,(%2)\n\t"
10235 +
10236 +#ifdef CONFIG_PAX_REFCOUNT
10237 + "jno 0f\n"
10238 + "mov %1,(%2)\n"
10239 + "int $4\n0:\n"
10240 + _ASM_EXTABLE(0b, 0b)
10241 +#endif
10242 +
10243 /* subtracts 1, returns the old value */
10244 " jns 1f\n\t"
10245 " call call_rwsem_wake\n" /* expects old value in %edx */
10246 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
10247 long tmp;
10248 asm volatile("# beginning __up_write\n\t"
10249 LOCK_PREFIX " xadd %1,(%2)\n\t"
10250 +
10251 +#ifdef CONFIG_PAX_REFCOUNT
10252 + "jno 0f\n"
10253 + "mov %1,(%2)\n"
10254 + "int $4\n0:\n"
10255 + _ASM_EXTABLE(0b, 0b)
10256 +#endif
10257 +
10258 /* subtracts 0xffff0001, returns the old value */
10259 " jns 1f\n\t"
10260 " call call_rwsem_wake\n" /* expects old value in %edx */
10261 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10262 {
10263 asm volatile("# beginning __downgrade_write\n\t"
10264 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10265 +
10266 +#ifdef CONFIG_PAX_REFCOUNT
10267 + "jno 0f\n"
10268 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10269 + "int $4\n0:\n"
10270 + _ASM_EXTABLE(0b, 0b)
10271 +#endif
10272 +
10273 /*
10274 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10275 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10276 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10277 */
10278 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10279 {
10280 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10281 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10282 +
10283 +#ifdef CONFIG_PAX_REFCOUNT
10284 + "jno 0f\n"
10285 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10286 + "int $4\n0:\n"
10287 + _ASM_EXTABLE(0b, 0b)
10288 +#endif
10289 +
10290 : "+m" (sem->count)
10291 : "er" (delta));
10292 }
10293 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10294 */
10295 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10296 {
10297 - return delta + xadd(&sem->count, delta);
10298 + return delta + xadd_check_overflow(&sem->count, delta);
10299 }
10300
10301 #endif /* __KERNEL__ */
10302 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10303 index 5e64171..f58957e 100644
10304 --- a/arch/x86/include/asm/segment.h
10305 +++ b/arch/x86/include/asm/segment.h
10306 @@ -64,10 +64,15 @@
10307 * 26 - ESPFIX small SS
10308 * 27 - per-cpu [ offset to per-cpu data area ]
10309 * 28 - stack_canary-20 [ for stack protector ]
10310 - * 29 - unused
10311 - * 30 - unused
10312 + * 29 - PCI BIOS CS
10313 + * 30 - PCI BIOS DS
10314 * 31 - TSS for double fault handler
10315 */
10316 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10317 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10318 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10319 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10320 +
10321 #define GDT_ENTRY_TLS_MIN 6
10322 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10323
10324 @@ -79,6 +84,8 @@
10325
10326 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10327
10328 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10329 +
10330 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10331
10332 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10333 @@ -104,6 +111,12 @@
10334 #define __KERNEL_STACK_CANARY 0
10335 #endif
10336
10337 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10338 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10339 +
10340 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10341 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10342 +
10343 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10344
10345 /*
10346 @@ -141,7 +154,7 @@
10347 */
10348
10349 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10350 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10351 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10352
10353
10354 #else
10355 @@ -165,6 +178,8 @@
10356 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10357 #define __USER32_DS __USER_DS
10358
10359 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10360 +
10361 #define GDT_ENTRY_TSS 8 /* needs two entries */
10362 #define GDT_ENTRY_LDT 10 /* needs two entries */
10363 #define GDT_ENTRY_TLS_MIN 12
10364 @@ -185,6 +200,7 @@
10365 #endif
10366
10367 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10368 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10369 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10370 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10371 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10372 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10373 index 73b11bc..d4a3b63 100644
10374 --- a/arch/x86/include/asm/smp.h
10375 +++ b/arch/x86/include/asm/smp.h
10376 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10377 /* cpus sharing the last level cache: */
10378 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10379 DECLARE_PER_CPU(u16, cpu_llc_id);
10380 -DECLARE_PER_CPU(int, cpu_number);
10381 +DECLARE_PER_CPU(unsigned int, cpu_number);
10382
10383 static inline struct cpumask *cpu_sibling_mask(int cpu)
10384 {
10385 @@ -77,7 +77,7 @@ struct smp_ops {
10386
10387 void (*send_call_func_ipi)(const struct cpumask *mask);
10388 void (*send_call_func_single_ipi)(int cpu);
10389 -};
10390 +} __no_const;
10391
10392 /* Globals due to paravirt */
10393 extern void set_cpu_sibling_map(int cpu);
10394 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10395 extern int safe_smp_processor_id(void);
10396
10397 #elif defined(CONFIG_X86_64_SMP)
10398 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10399 -
10400 -#define stack_smp_processor_id() \
10401 -({ \
10402 - struct thread_info *ti; \
10403 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10404 - ti->cpu; \
10405 -})
10406 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10407 +#define stack_smp_processor_id() raw_smp_processor_id()
10408 #define safe_smp_processor_id() smp_processor_id()
10409
10410 #endif
10411 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10412 index 972c260..43ab1fd 100644
10413 --- a/arch/x86/include/asm/spinlock.h
10414 +++ b/arch/x86/include/asm/spinlock.h
10415 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10416 static inline void arch_read_lock(arch_rwlock_t *rw)
10417 {
10418 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10419 +
10420 +#ifdef CONFIG_PAX_REFCOUNT
10421 + "jno 0f\n"
10422 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10423 + "int $4\n0:\n"
10424 + _ASM_EXTABLE(0b, 0b)
10425 +#endif
10426 +
10427 "jns 1f\n"
10428 "call __read_lock_failed\n\t"
10429 "1:\n"
10430 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10431 static inline void arch_write_lock(arch_rwlock_t *rw)
10432 {
10433 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10434 +
10435 +#ifdef CONFIG_PAX_REFCOUNT
10436 + "jno 0f\n"
10437 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10438 + "int $4\n0:\n"
10439 + _ASM_EXTABLE(0b, 0b)
10440 +#endif
10441 +
10442 "jz 1f\n"
10443 "call __write_lock_failed\n\t"
10444 "1:\n"
10445 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10446
10447 static inline void arch_read_unlock(arch_rwlock_t *rw)
10448 {
10449 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10450 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10451 +
10452 +#ifdef CONFIG_PAX_REFCOUNT
10453 + "jno 0f\n"
10454 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10455 + "int $4\n0:\n"
10456 + _ASM_EXTABLE(0b, 0b)
10457 +#endif
10458 +
10459 :"+m" (rw->lock) : : "memory");
10460 }
10461
10462 static inline void arch_write_unlock(arch_rwlock_t *rw)
10463 {
10464 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10465 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10466 +
10467 +#ifdef CONFIG_PAX_REFCOUNT
10468 + "jno 0f\n"
10469 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10470 + "int $4\n0:\n"
10471 + _ASM_EXTABLE(0b, 0b)
10472 +#endif
10473 +
10474 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10475 }
10476
10477 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10478 index 1575177..cb23f52 100644
10479 --- a/arch/x86/include/asm/stackprotector.h
10480 +++ b/arch/x86/include/asm/stackprotector.h
10481 @@ -48,7 +48,7 @@
10482 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10483 */
10484 #define GDT_STACK_CANARY_INIT \
10485 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10486 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10487
10488 /*
10489 * Initialize the stackprotector canary value.
10490 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10491
10492 static inline void load_stack_canary_segment(void)
10493 {
10494 -#ifdef CONFIG_X86_32
10495 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10496 asm volatile ("mov %0, %%gs" : : "r" (0));
10497 #endif
10498 }
10499 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10500 index 70bbe39..4ae2bd4 100644
10501 --- a/arch/x86/include/asm/stacktrace.h
10502 +++ b/arch/x86/include/asm/stacktrace.h
10503 @@ -11,28 +11,20 @@
10504
10505 extern int kstack_depth_to_print;
10506
10507 -struct thread_info;
10508 +struct task_struct;
10509 struct stacktrace_ops;
10510
10511 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10512 - unsigned long *stack,
10513 - unsigned long bp,
10514 - const struct stacktrace_ops *ops,
10515 - void *data,
10516 - unsigned long *end,
10517 - int *graph);
10518 +typedef unsigned long walk_stack_t(struct task_struct *task,
10519 + void *stack_start,
10520 + unsigned long *stack,
10521 + unsigned long bp,
10522 + const struct stacktrace_ops *ops,
10523 + void *data,
10524 + unsigned long *end,
10525 + int *graph);
10526
10527 -extern unsigned long
10528 -print_context_stack(struct thread_info *tinfo,
10529 - unsigned long *stack, unsigned long bp,
10530 - const struct stacktrace_ops *ops, void *data,
10531 - unsigned long *end, int *graph);
10532 -
10533 -extern unsigned long
10534 -print_context_stack_bp(struct thread_info *tinfo,
10535 - unsigned long *stack, unsigned long bp,
10536 - const struct stacktrace_ops *ops, void *data,
10537 - unsigned long *end, int *graph);
10538 +extern walk_stack_t print_context_stack;
10539 +extern walk_stack_t print_context_stack_bp;
10540
10541 /* Generic stack tracer with callbacks */
10542
10543 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10544 void (*address)(void *data, unsigned long address, int reliable);
10545 /* On negative return stop dumping */
10546 int (*stack)(void *data, char *name);
10547 - walk_stack_t walk_stack;
10548 + walk_stack_t *walk_stack;
10549 };
10550
10551 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10552 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10553 index cb23852..2dde194 100644
10554 --- a/arch/x86/include/asm/sys_ia32.h
10555 +++ b/arch/x86/include/asm/sys_ia32.h
10556 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10557 compat_sigset_t __user *, unsigned int);
10558 asmlinkage long sys32_alarm(unsigned int);
10559
10560 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10561 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10562 asmlinkage long sys32_sysfs(int, u32, u32);
10563
10564 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10565 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10566 index 2d2f01c..f985723 100644
10567 --- a/arch/x86/include/asm/system.h
10568 +++ b/arch/x86/include/asm/system.h
10569 @@ -129,7 +129,7 @@ do { \
10570 "call __switch_to\n\t" \
10571 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10572 __switch_canary \
10573 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10574 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10575 "movq %%rax,%%rdi\n\t" \
10576 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10577 "jnz ret_from_fork\n\t" \
10578 @@ -140,7 +140,7 @@ do { \
10579 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10580 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10581 [_tif_fork] "i" (_TIF_FORK), \
10582 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10583 + [thread_info] "m" (current_tinfo), \
10584 [current_task] "m" (current_task) \
10585 __switch_canary_iparam \
10586 : "memory", "cc" __EXTRA_CLOBBER)
10587 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10588 {
10589 unsigned long __limit;
10590 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10591 - return __limit + 1;
10592 + return __limit;
10593 }
10594
10595 static inline void native_clts(void)
10596 @@ -397,13 +397,13 @@ void enable_hlt(void);
10597
10598 void cpu_idle_wait(void);
10599
10600 -extern unsigned long arch_align_stack(unsigned long sp);
10601 +#define arch_align_stack(x) ((x) & ~0xfUL)
10602 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10603
10604 void default_idle(void);
10605 bool set_pm_idle_to_default(void);
10606
10607 -void stop_this_cpu(void *dummy);
10608 +void stop_this_cpu(void *dummy) __noreturn;
10609
10610 /*
10611 * Force strict CPU ordering.
10612 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10613 index a1fe5c1..ee326d8 100644
10614 --- a/arch/x86/include/asm/thread_info.h
10615 +++ b/arch/x86/include/asm/thread_info.h
10616 @@ -10,6 +10,7 @@
10617 #include <linux/compiler.h>
10618 #include <asm/page.h>
10619 #include <asm/types.h>
10620 +#include <asm/percpu.h>
10621
10622 /*
10623 * low level task data that entry.S needs immediate access to
10624 @@ -24,7 +25,6 @@ struct exec_domain;
10625 #include <linux/atomic.h>
10626
10627 struct thread_info {
10628 - struct task_struct *task; /* main task structure */
10629 struct exec_domain *exec_domain; /* execution domain */
10630 __u32 flags; /* low level flags */
10631 __u32 status; /* thread synchronous flags */
10632 @@ -34,18 +34,12 @@ struct thread_info {
10633 mm_segment_t addr_limit;
10634 struct restart_block restart_block;
10635 void __user *sysenter_return;
10636 -#ifdef CONFIG_X86_32
10637 - unsigned long previous_esp; /* ESP of the previous stack in
10638 - case of nested (IRQ) stacks
10639 - */
10640 - __u8 supervisor_stack[0];
10641 -#endif
10642 + unsigned long lowest_stack;
10643 int uaccess_err;
10644 };
10645
10646 -#define INIT_THREAD_INFO(tsk) \
10647 +#define INIT_THREAD_INFO \
10648 { \
10649 - .task = &tsk, \
10650 .exec_domain = &default_exec_domain, \
10651 .flags = 0, \
10652 .cpu = 0, \
10653 @@ -56,7 +50,7 @@ struct thread_info {
10654 }, \
10655 }
10656
10657 -#define init_thread_info (init_thread_union.thread_info)
10658 +#define init_thread_info (init_thread_union.stack)
10659 #define init_stack (init_thread_union.stack)
10660
10661 #else /* !__ASSEMBLY__ */
10662 @@ -170,45 +164,40 @@ struct thread_info {
10663 ret; \
10664 })
10665
10666 -#ifdef CONFIG_X86_32
10667 -
10668 -#define STACK_WARN (THREAD_SIZE/8)
10669 -/*
10670 - * macros/functions for gaining access to the thread information structure
10671 - *
10672 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10673 - */
10674 -#ifndef __ASSEMBLY__
10675 -
10676 -
10677 -/* how to get the current stack pointer from C */
10678 -register unsigned long current_stack_pointer asm("esp") __used;
10679 -
10680 -/* how to get the thread information struct from C */
10681 -static inline struct thread_info *current_thread_info(void)
10682 -{
10683 - return (struct thread_info *)
10684 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10685 -}
10686 -
10687 -#else /* !__ASSEMBLY__ */
10688 -
10689 +#ifdef __ASSEMBLY__
10690 /* how to get the thread information struct from ASM */
10691 #define GET_THREAD_INFO(reg) \
10692 - movl $-THREAD_SIZE, reg; \
10693 - andl %esp, reg
10694 + mov PER_CPU_VAR(current_tinfo), reg
10695
10696 /* use this one if reg already contains %esp */
10697 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10698 - andl $-THREAD_SIZE, reg
10699 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10700 +#else
10701 +/* how to get the thread information struct from C */
10702 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10703 +
10704 +static __always_inline struct thread_info *current_thread_info(void)
10705 +{
10706 + return percpu_read_stable(current_tinfo);
10707 +}
10708 +#endif
10709 +
10710 +#ifdef CONFIG_X86_32
10711 +
10712 +#define STACK_WARN (THREAD_SIZE/8)
10713 +/*
10714 + * macros/functions for gaining access to the thread information structure
10715 + *
10716 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10717 + */
10718 +#ifndef __ASSEMBLY__
10719 +
10720 +/* how to get the current stack pointer from C */
10721 +register unsigned long current_stack_pointer asm("esp") __used;
10722
10723 #endif
10724
10725 #else /* X86_32 */
10726
10727 -#include <asm/percpu.h>
10728 -#define KERNEL_STACK_OFFSET (5*8)
10729 -
10730 /*
10731 * macros/functions for gaining access to the thread information structure
10732 * preempt_count needs to be 1 initially, until the scheduler is functional.
10733 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10734 #ifndef __ASSEMBLY__
10735 DECLARE_PER_CPU(unsigned long, kernel_stack);
10736
10737 -static inline struct thread_info *current_thread_info(void)
10738 -{
10739 - struct thread_info *ti;
10740 - ti = (void *)(percpu_read_stable(kernel_stack) +
10741 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10742 - return ti;
10743 -}
10744 -
10745 -#else /* !__ASSEMBLY__ */
10746 -
10747 -/* how to get the thread information struct from ASM */
10748 -#define GET_THREAD_INFO(reg) \
10749 - movq PER_CPU_VAR(kernel_stack),reg ; \
10750 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10751 -
10752 +/* how to get the current stack pointer from C */
10753 +register unsigned long current_stack_pointer asm("rsp") __used;
10754 #endif
10755
10756 #endif /* !X86_32 */
10757 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10758 extern void free_thread_info(struct thread_info *ti);
10759 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10760 #define arch_task_cache_init arch_task_cache_init
10761 +
10762 +#define __HAVE_THREAD_FUNCTIONS
10763 +#define task_thread_info(task) (&(task)->tinfo)
10764 +#define task_stack_page(task) ((task)->stack)
10765 +#define setup_thread_stack(p, org) do {} while (0)
10766 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10767 +
10768 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10769 +extern struct task_struct *alloc_task_struct_node(int node);
10770 +extern void free_task_struct(struct task_struct *);
10771 +
10772 #endif
10773 #endif /* _ASM_X86_THREAD_INFO_H */
10774 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10775 index 36361bf..324f262 100644
10776 --- a/arch/x86/include/asm/uaccess.h
10777 +++ b/arch/x86/include/asm/uaccess.h
10778 @@ -7,12 +7,15 @@
10779 #include <linux/compiler.h>
10780 #include <linux/thread_info.h>
10781 #include <linux/string.h>
10782 +#include <linux/sched.h>
10783 #include <asm/asm.h>
10784 #include <asm/page.h>
10785
10786 #define VERIFY_READ 0
10787 #define VERIFY_WRITE 1
10788
10789 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10790 +
10791 /*
10792 * The fs value determines whether argument validity checking should be
10793 * performed or not. If get_fs() == USER_DS, checking is performed, with
10794 @@ -28,7 +31,12 @@
10795
10796 #define get_ds() (KERNEL_DS)
10797 #define get_fs() (current_thread_info()->addr_limit)
10798 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10799 +void __set_fs(mm_segment_t x);
10800 +void set_fs(mm_segment_t x);
10801 +#else
10802 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10803 +#endif
10804
10805 #define segment_eq(a, b) ((a).seg == (b).seg)
10806
10807 @@ -76,7 +84,33 @@
10808 * checks that the pointer is in the user space range - after calling
10809 * this function, memory access functions may still return -EFAULT.
10810 */
10811 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10812 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10813 +#define access_ok(type, addr, size) \
10814 +({ \
10815 + long __size = size; \
10816 + unsigned long __addr = (unsigned long)addr; \
10817 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10818 + unsigned long __end_ao = __addr + __size - 1; \
10819 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10820 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10821 + while(__addr_ao <= __end_ao) { \
10822 + char __c_ao; \
10823 + __addr_ao += PAGE_SIZE; \
10824 + if (__size > PAGE_SIZE) \
10825 + cond_resched(); \
10826 + if (__get_user(__c_ao, (char __user *)__addr)) \
10827 + break; \
10828 + if (type != VERIFY_WRITE) { \
10829 + __addr = __addr_ao; \
10830 + continue; \
10831 + } \
10832 + if (__put_user(__c_ao, (char __user *)__addr)) \
10833 + break; \
10834 + __addr = __addr_ao; \
10835 + } \
10836 + } \
10837 + __ret_ao; \
10838 +})
10839
10840 /*
10841 * The exception table consists of pairs of addresses: the first is the
10842 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10843 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10844 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10845
10846 -
10847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10848 +#define __copyuser_seg "gs;"
10849 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10850 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10851 +#else
10852 +#define __copyuser_seg
10853 +#define __COPYUSER_SET_ES
10854 +#define __COPYUSER_RESTORE_ES
10855 +#endif
10856
10857 #ifdef CONFIG_X86_32
10858 #define __put_user_asm_u64(x, addr, err, errret) \
10859 - asm volatile("1: movl %%eax,0(%2)\n" \
10860 - "2: movl %%edx,4(%2)\n" \
10861 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10862 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10863 "3:\n" \
10864 ".section .fixup,\"ax\"\n" \
10865 "4: movl %3,%0\n" \
10866 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10867 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10868
10869 #define __put_user_asm_ex_u64(x, addr) \
10870 - asm volatile("1: movl %%eax,0(%1)\n" \
10871 - "2: movl %%edx,4(%1)\n" \
10872 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10873 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10874 "3:\n" \
10875 _ASM_EXTABLE(1b, 2b - 1b) \
10876 _ASM_EXTABLE(2b, 3b - 2b) \
10877 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10878 __typeof__(*(ptr)) __pu_val; \
10879 __chk_user_ptr(ptr); \
10880 might_fault(); \
10881 - __pu_val = x; \
10882 + __pu_val = (x); \
10883 switch (sizeof(*(ptr))) { \
10884 case 1: \
10885 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10886 @@ -373,7 +415,7 @@ do { \
10887 } while (0)
10888
10889 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10890 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10891 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10892 "2:\n" \
10893 ".section .fixup,\"ax\"\n" \
10894 "3: mov %3,%0\n" \
10895 @@ -381,7 +423,7 @@ do { \
10896 " jmp 2b\n" \
10897 ".previous\n" \
10898 _ASM_EXTABLE(1b, 3b) \
10899 - : "=r" (err), ltype(x) \
10900 + : "=r" (err), ltype (x) \
10901 : "m" (__m(addr)), "i" (errret), "0" (err))
10902
10903 #define __get_user_size_ex(x, ptr, size) \
10904 @@ -406,7 +448,7 @@ do { \
10905 } while (0)
10906
10907 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10908 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10909 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10910 "2:\n" \
10911 _ASM_EXTABLE(1b, 2b - 1b) \
10912 : ltype(x) : "m" (__m(addr)))
10913 @@ -423,13 +465,24 @@ do { \
10914 int __gu_err; \
10915 unsigned long __gu_val; \
10916 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10917 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10918 + (x) = (__typeof__(*(ptr)))__gu_val; \
10919 __gu_err; \
10920 })
10921
10922 /* FIXME: this hack is definitely wrong -AK */
10923 struct __large_struct { unsigned long buf[100]; };
10924 -#define __m(x) (*(struct __large_struct __user *)(x))
10925 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10926 +#define ____m(x) \
10927 +({ \
10928 + unsigned long ____x = (unsigned long)(x); \
10929 + if (____x < PAX_USER_SHADOW_BASE) \
10930 + ____x += PAX_USER_SHADOW_BASE; \
10931 + (void __user *)____x; \
10932 +})
10933 +#else
10934 +#define ____m(x) (x)
10935 +#endif
10936 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10937
10938 /*
10939 * Tell gcc we read from memory instead of writing: this is because
10940 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10941 * aliasing issues.
10942 */
10943 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10944 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10945 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10946 "2:\n" \
10947 ".section .fixup,\"ax\"\n" \
10948 "3: mov %3,%0\n" \
10949 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10950 ".previous\n" \
10951 _ASM_EXTABLE(1b, 3b) \
10952 : "=r"(err) \
10953 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10954 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10955
10956 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10957 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10958 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10959 "2:\n" \
10960 _ASM_EXTABLE(1b, 2b - 1b) \
10961 : : ltype(x), "m" (__m(addr)))
10962 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10963 * On error, the variable @x is set to zero.
10964 */
10965
10966 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10967 +#define __get_user(x, ptr) get_user((x), (ptr))
10968 +#else
10969 #define __get_user(x, ptr) \
10970 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10971 +#endif
10972
10973 /**
10974 * __put_user: - Write a simple value into user space, with less checking.
10975 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10976 * Returns zero on success, or -EFAULT on error.
10977 */
10978
10979 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10980 +#define __put_user(x, ptr) put_user((x), (ptr))
10981 +#else
10982 #define __put_user(x, ptr) \
10983 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10984 +#endif
10985
10986 #define __get_user_unaligned __get_user
10987 #define __put_user_unaligned __put_user
10988 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10989 #define get_user_ex(x, ptr) do { \
10990 unsigned long __gue_val; \
10991 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10992 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10993 + (x) = (__typeof__(*(ptr)))__gue_val; \
10994 } while (0)
10995
10996 #ifdef CONFIG_X86_WP_WORKS_OK
10997 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10998 index 566e803..b9521e9 100644
10999 --- a/arch/x86/include/asm/uaccess_32.h
11000 +++ b/arch/x86/include/asm/uaccess_32.h
11001 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
11002 static __always_inline unsigned long __must_check
11003 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
11004 {
11005 + if ((long)n < 0)
11006 + return n;
11007 +
11008 if (__builtin_constant_p(n)) {
11009 unsigned long ret;
11010
11011 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
11012 return ret;
11013 }
11014 }
11015 + if (!__builtin_constant_p(n))
11016 + check_object_size(from, n, true);
11017 return __copy_to_user_ll(to, from, n);
11018 }
11019
11020 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
11021 __copy_to_user(void __user *to, const void *from, unsigned long n)
11022 {
11023 might_fault();
11024 +
11025 return __copy_to_user_inatomic(to, from, n);
11026 }
11027
11028 static __always_inline unsigned long
11029 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
11030 {
11031 + if ((long)n < 0)
11032 + return n;
11033 +
11034 /* Avoid zeroing the tail if the copy fails..
11035 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
11036 * but as the zeroing behaviour is only significant when n is not
11037 @@ -137,6 +146,10 @@ static __always_inline unsigned long
11038 __copy_from_user(void *to, const void __user *from, unsigned long n)
11039 {
11040 might_fault();
11041 +
11042 + if ((long)n < 0)
11043 + return n;
11044 +
11045 if (__builtin_constant_p(n)) {
11046 unsigned long ret;
11047
11048 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
11049 return ret;
11050 }
11051 }
11052 + if (!__builtin_constant_p(n))
11053 + check_object_size(to, n, false);
11054 return __copy_from_user_ll(to, from, n);
11055 }
11056
11057 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
11058 const void __user *from, unsigned long n)
11059 {
11060 might_fault();
11061 +
11062 + if ((long)n < 0)
11063 + return n;
11064 +
11065 if (__builtin_constant_p(n)) {
11066 unsigned long ret;
11067
11068 @@ -181,15 +200,19 @@ static __always_inline unsigned long
11069 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
11070 unsigned long n)
11071 {
11072 - return __copy_from_user_ll_nocache_nozero(to, from, n);
11073 + if ((long)n < 0)
11074 + return n;
11075 +
11076 + return __copy_from_user_ll_nocache_nozero(to, from, n);
11077 }
11078
11079 -unsigned long __must_check copy_to_user(void __user *to,
11080 - const void *from, unsigned long n);
11081 -unsigned long __must_check _copy_from_user(void *to,
11082 - const void __user *from,
11083 - unsigned long n);
11084 -
11085 +extern void copy_to_user_overflow(void)
11086 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11087 + __compiletime_error("copy_to_user() buffer size is not provably correct")
11088 +#else
11089 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
11090 +#endif
11091 +;
11092
11093 extern void copy_from_user_overflow(void)
11094 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11095 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
11096 #endif
11097 ;
11098
11099 -static inline unsigned long __must_check copy_from_user(void *to,
11100 - const void __user *from,
11101 - unsigned long n)
11102 +/**
11103 + * copy_to_user: - Copy a block of data into user space.
11104 + * @to: Destination address, in user space.
11105 + * @from: Source address, in kernel space.
11106 + * @n: Number of bytes to copy.
11107 + *
11108 + * Context: User context only. This function may sleep.
11109 + *
11110 + * Copy data from kernel space to user space.
11111 + *
11112 + * Returns number of bytes that could not be copied.
11113 + * On success, this will be zero.
11114 + */
11115 +static inline unsigned long __must_check
11116 +copy_to_user(void __user *to, const void *from, unsigned long n)
11117 +{
11118 + int sz = __compiletime_object_size(from);
11119 +
11120 + if (unlikely(sz != -1 && sz < n))
11121 + copy_to_user_overflow();
11122 + else if (access_ok(VERIFY_WRITE, to, n))
11123 + n = __copy_to_user(to, from, n);
11124 + return n;
11125 +}
11126 +
11127 +/**
11128 + * copy_from_user: - Copy a block of data from user space.
11129 + * @to: Destination address, in kernel space.
11130 + * @from: Source address, in user space.
11131 + * @n: Number of bytes to copy.
11132 + *
11133 + * Context: User context only. This function may sleep.
11134 + *
11135 + * Copy data from user space to kernel space.
11136 + *
11137 + * Returns number of bytes that could not be copied.
11138 + * On success, this will be zero.
11139 + *
11140 + * If some data could not be copied, this function will pad the copied
11141 + * data to the requested size using zero bytes.
11142 + */
11143 +static inline unsigned long __must_check
11144 +copy_from_user(void *to, const void __user *from, unsigned long n)
11145 {
11146 int sz = __compiletime_object_size(to);
11147
11148 - if (likely(sz == -1 || sz >= n))
11149 - n = _copy_from_user(to, from, n);
11150 - else
11151 + if (unlikely(sz != -1 && sz < n))
11152 copy_from_user_overflow();
11153 -
11154 + else if (access_ok(VERIFY_READ, from, n))
11155 + n = __copy_from_user(to, from, n);
11156 + else if ((long)n > 0) {
11157 + if (!__builtin_constant_p(n))
11158 + check_object_size(to, n, false);
11159 + memset(to, 0, n);
11160 + }
11161 return n;
11162 }
11163
11164 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
11165 index 1c66d30..e66922c 100644
11166 --- a/arch/x86/include/asm/uaccess_64.h
11167 +++ b/arch/x86/include/asm/uaccess_64.h
11168 @@ -10,6 +10,9 @@
11169 #include <asm/alternative.h>
11170 #include <asm/cpufeature.h>
11171 #include <asm/page.h>
11172 +#include <asm/pgtable.h>
11173 +
11174 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
11175
11176 /*
11177 * Copy To/From Userspace
11178 @@ -17,12 +20,12 @@
11179
11180 /* Handles exceptions in both to and from, but doesn't do access_ok */
11181 __must_check unsigned long
11182 -copy_user_generic_string(void *to, const void *from, unsigned len);
11183 +copy_user_generic_string(void *to, const void *from, unsigned long len);
11184 __must_check unsigned long
11185 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
11186 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
11187
11188 static __always_inline __must_check unsigned long
11189 -copy_user_generic(void *to, const void *from, unsigned len)
11190 +copy_user_generic(void *to, const void *from, unsigned long len)
11191 {
11192 unsigned ret;
11193
11194 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
11195 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
11196 "=d" (len)),
11197 "1" (to), "2" (from), "3" (len)
11198 - : "memory", "rcx", "r8", "r9", "r10", "r11");
11199 + : "memory", "rcx", "r8", "r9", "r11");
11200 return ret;
11201 }
11202
11203 +static __always_inline __must_check unsigned long
11204 +__copy_to_user(void __user *to, const void *from, unsigned long len);
11205 +static __always_inline __must_check unsigned long
11206 +__copy_from_user(void *to, const void __user *from, unsigned long len);
11207 __must_check unsigned long
11208 -_copy_to_user(void __user *to, const void *from, unsigned len);
11209 -__must_check unsigned long
11210 -_copy_from_user(void *to, const void __user *from, unsigned len);
11211 -__must_check unsigned long
11212 -copy_in_user(void __user *to, const void __user *from, unsigned len);
11213 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
11214
11215 static inline unsigned long __must_check copy_from_user(void *to,
11216 const void __user *from,
11217 unsigned long n)
11218 {
11219 - int sz = __compiletime_object_size(to);
11220 -
11221 might_fault();
11222 - if (likely(sz == -1 || sz >= n))
11223 - n = _copy_from_user(to, from, n);
11224 -#ifdef CONFIG_DEBUG_VM
11225 - else
11226 - WARN(1, "Buffer overflow detected!\n");
11227 -#endif
11228 +
11229 + if (access_ok(VERIFY_READ, from, n))
11230 + n = __copy_from_user(to, from, n);
11231 + else if (n < INT_MAX) {
11232 + if (!__builtin_constant_p(n))
11233 + check_object_size(to, n, false);
11234 + memset(to, 0, n);
11235 + }
11236 return n;
11237 }
11238
11239 static __always_inline __must_check
11240 -int copy_to_user(void __user *dst, const void *src, unsigned size)
11241 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
11242 {
11243 might_fault();
11244
11245 - return _copy_to_user(dst, src, size);
11246 + if (access_ok(VERIFY_WRITE, dst, size))
11247 + size = __copy_to_user(dst, src, size);
11248 + return size;
11249 }
11250
11251 static __always_inline __must_check
11252 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
11253 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
11254 {
11255 - int ret = 0;
11256 + int sz = __compiletime_object_size(dst);
11257 + unsigned ret = 0;
11258
11259 might_fault();
11260 - if (!__builtin_constant_p(size))
11261 - return copy_user_generic(dst, (__force void *)src, size);
11262 +
11263 + if (size > INT_MAX)
11264 + return size;
11265 +
11266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11267 + if (!__access_ok(VERIFY_READ, src, size))
11268 + return size;
11269 +#endif
11270 +
11271 + if (unlikely(sz != -1 && sz < size)) {
11272 +#ifdef CONFIG_DEBUG_VM
11273 + WARN(1, "Buffer overflow detected!\n");
11274 +#endif
11275 + return size;
11276 + }
11277 +
11278 + if (!__builtin_constant_p(size)) {
11279 + check_object_size(dst, size, false);
11280 +
11281 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11282 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11283 + src += PAX_USER_SHADOW_BASE;
11284 +#endif
11285 +
11286 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11287 + }
11288 switch (size) {
11289 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11290 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11291 ret, "b", "b", "=q", 1);
11292 return ret;
11293 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11294 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11295 ret, "w", "w", "=r", 2);
11296 return ret;
11297 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11298 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11299 ret, "l", "k", "=r", 4);
11300 return ret;
11301 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11302 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11303 ret, "q", "", "=r", 8);
11304 return ret;
11305 case 10:
11306 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11307 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11308 ret, "q", "", "=r", 10);
11309 if (unlikely(ret))
11310 return ret;
11311 __get_user_asm(*(u16 *)(8 + (char *)dst),
11312 - (u16 __user *)(8 + (char __user *)src),
11313 + (const u16 __user *)(8 + (const char __user *)src),
11314 ret, "w", "w", "=r", 2);
11315 return ret;
11316 case 16:
11317 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11318 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11319 ret, "q", "", "=r", 16);
11320 if (unlikely(ret))
11321 return ret;
11322 __get_user_asm(*(u64 *)(8 + (char *)dst),
11323 - (u64 __user *)(8 + (char __user *)src),
11324 + (const u64 __user *)(8 + (const char __user *)src),
11325 ret, "q", "", "=r", 8);
11326 return ret;
11327 default:
11328 - return copy_user_generic(dst, (__force void *)src, size);
11329 +
11330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11331 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11332 + src += PAX_USER_SHADOW_BASE;
11333 +#endif
11334 +
11335 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11336 }
11337 }
11338
11339 static __always_inline __must_check
11340 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
11341 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11342 {
11343 - int ret = 0;
11344 + int sz = __compiletime_object_size(src);
11345 + unsigned ret = 0;
11346
11347 might_fault();
11348 - if (!__builtin_constant_p(size))
11349 - return copy_user_generic((__force void *)dst, src, size);
11350 +
11351 + if (size > INT_MAX)
11352 + return size;
11353 +
11354 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11355 + if (!__access_ok(VERIFY_WRITE, dst, size))
11356 + return size;
11357 +#endif
11358 +
11359 + if (unlikely(sz != -1 && sz < size)) {
11360 +#ifdef CONFIG_DEBUG_VM
11361 + WARN(1, "Buffer overflow detected!\n");
11362 +#endif
11363 + return size;
11364 + }
11365 +
11366 + if (!__builtin_constant_p(size)) {
11367 + check_object_size(src, size, true);
11368 +
11369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11370 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11371 + dst += PAX_USER_SHADOW_BASE;
11372 +#endif
11373 +
11374 + return copy_user_generic((__force_kernel void *)dst, src, size);
11375 + }
11376 switch (size) {
11377 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11378 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11379 ret, "b", "b", "iq", 1);
11380 return ret;
11381 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11382 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11383 ret, "w", "w", "ir", 2);
11384 return ret;
11385 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11386 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11387 ret, "l", "k", "ir", 4);
11388 return ret;
11389 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11390 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11391 ret, "q", "", "er", 8);
11392 return ret;
11393 case 10:
11394 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11395 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11396 ret, "q", "", "er", 10);
11397 if (unlikely(ret))
11398 return ret;
11399 asm("":::"memory");
11400 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11401 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11402 ret, "w", "w", "ir", 2);
11403 return ret;
11404 case 16:
11405 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11406 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11407 ret, "q", "", "er", 16);
11408 if (unlikely(ret))
11409 return ret;
11410 asm("":::"memory");
11411 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11412 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11413 ret, "q", "", "er", 8);
11414 return ret;
11415 default:
11416 - return copy_user_generic((__force void *)dst, src, size);
11417 +
11418 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11419 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11420 + dst += PAX_USER_SHADOW_BASE;
11421 +#endif
11422 +
11423 + return copy_user_generic((__force_kernel void *)dst, src, size);
11424 }
11425 }
11426
11427 static __always_inline __must_check
11428 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11429 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11430 {
11431 - int ret = 0;
11432 + unsigned ret = 0;
11433
11434 might_fault();
11435 - if (!__builtin_constant_p(size))
11436 - return copy_user_generic((__force void *)dst,
11437 - (__force void *)src, size);
11438 +
11439 + if (size > INT_MAX)
11440 + return size;
11441 +
11442 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11443 + if (!__access_ok(VERIFY_READ, src, size))
11444 + return size;
11445 + if (!__access_ok(VERIFY_WRITE, dst, size))
11446 + return size;
11447 +#endif
11448 +
11449 + if (!__builtin_constant_p(size)) {
11450 +
11451 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11452 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11453 + src += PAX_USER_SHADOW_BASE;
11454 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11455 + dst += PAX_USER_SHADOW_BASE;
11456 +#endif
11457 +
11458 + return copy_user_generic((__force_kernel void *)dst,
11459 + (__force_kernel const void *)src, size);
11460 + }
11461 switch (size) {
11462 case 1: {
11463 u8 tmp;
11464 - __get_user_asm(tmp, (u8 __user *)src,
11465 + __get_user_asm(tmp, (const u8 __user *)src,
11466 ret, "b", "b", "=q", 1);
11467 if (likely(!ret))
11468 __put_user_asm(tmp, (u8 __user *)dst,
11469 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11470 }
11471 case 2: {
11472 u16 tmp;
11473 - __get_user_asm(tmp, (u16 __user *)src,
11474 + __get_user_asm(tmp, (const u16 __user *)src,
11475 ret, "w", "w", "=r", 2);
11476 if (likely(!ret))
11477 __put_user_asm(tmp, (u16 __user *)dst,
11478 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11479
11480 case 4: {
11481 u32 tmp;
11482 - __get_user_asm(tmp, (u32 __user *)src,
11483 + __get_user_asm(tmp, (const u32 __user *)src,
11484 ret, "l", "k", "=r", 4);
11485 if (likely(!ret))
11486 __put_user_asm(tmp, (u32 __user *)dst,
11487 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11488 }
11489 case 8: {
11490 u64 tmp;
11491 - __get_user_asm(tmp, (u64 __user *)src,
11492 + __get_user_asm(tmp, (const u64 __user *)src,
11493 ret, "q", "", "=r", 8);
11494 if (likely(!ret))
11495 __put_user_asm(tmp, (u64 __user *)dst,
11496 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11497 return ret;
11498 }
11499 default:
11500 - return copy_user_generic((__force void *)dst,
11501 - (__force void *)src, size);
11502 +
11503 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11504 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11505 + src += PAX_USER_SHADOW_BASE;
11506 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11507 + dst += PAX_USER_SHADOW_BASE;
11508 +#endif
11509 +
11510 + return copy_user_generic((__force_kernel void *)dst,
11511 + (__force_kernel const void *)src, size);
11512 }
11513 }
11514
11515 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11516 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11517
11518 static __must_check __always_inline int
11519 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11520 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11521 {
11522 - return copy_user_generic(dst, (__force const void *)src, size);
11523 + if (size > INT_MAX)
11524 + return size;
11525 +
11526 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11527 + if (!__access_ok(VERIFY_READ, src, size))
11528 + return size;
11529 +
11530 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11531 + src += PAX_USER_SHADOW_BASE;
11532 +#endif
11533 +
11534 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11535 }
11536
11537 -static __must_check __always_inline int
11538 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11539 +static __must_check __always_inline unsigned long
11540 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11541 {
11542 - return copy_user_generic((__force void *)dst, src, size);
11543 + if (size > INT_MAX)
11544 + return size;
11545 +
11546 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11547 + if (!__access_ok(VERIFY_WRITE, dst, size))
11548 + return size;
11549 +
11550 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11551 + dst += PAX_USER_SHADOW_BASE;
11552 +#endif
11553 +
11554 + return copy_user_generic((__force_kernel void *)dst, src, size);
11555 }
11556
11557 -extern long __copy_user_nocache(void *dst, const void __user *src,
11558 - unsigned size, int zerorest);
11559 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11560 + unsigned long size, int zerorest);
11561
11562 -static inline int
11563 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11564 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11565 {
11566 might_sleep();
11567 +
11568 + if (size > INT_MAX)
11569 + return size;
11570 +
11571 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11572 + if (!__access_ok(VERIFY_READ, src, size))
11573 + return size;
11574 +#endif
11575 +
11576 return __copy_user_nocache(dst, src, size, 1);
11577 }
11578
11579 -static inline int
11580 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11581 - unsigned size)
11582 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11583 + unsigned long size)
11584 {
11585 + if (size > INT_MAX)
11586 + return size;
11587 +
11588 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11589 + if (!__access_ok(VERIFY_READ, src, size))
11590 + return size;
11591 +#endif
11592 +
11593 return __copy_user_nocache(dst, src, size, 0);
11594 }
11595
11596 -unsigned long
11597 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11598 +extern unsigned long
11599 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11600
11601 #endif /* _ASM_X86_UACCESS_64_H */
11602 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11603 index bb05228..d763d5b 100644
11604 --- a/arch/x86/include/asm/vdso.h
11605 +++ b/arch/x86/include/asm/vdso.h
11606 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11607 #define VDSO32_SYMBOL(base, name) \
11608 ({ \
11609 extern const char VDSO32_##name[]; \
11610 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11611 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11612 })
11613 #endif
11614
11615 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11616 index 1971e65..1e3559b 100644
11617 --- a/arch/x86/include/asm/x86_init.h
11618 +++ b/arch/x86/include/asm/x86_init.h
11619 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11620 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11621 void (*find_smp_config)(void);
11622 void (*get_smp_config)(unsigned int early);
11623 -};
11624 +} __no_const;
11625
11626 /**
11627 * struct x86_init_resources - platform specific resource related ops
11628 @@ -42,7 +42,7 @@ struct x86_init_resources {
11629 void (*probe_roms)(void);
11630 void (*reserve_resources)(void);
11631 char *(*memory_setup)(void);
11632 -};
11633 +} __no_const;
11634
11635 /**
11636 * struct x86_init_irqs - platform specific interrupt setup
11637 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11638 void (*pre_vector_init)(void);
11639 void (*intr_init)(void);
11640 void (*trap_init)(void);
11641 -};
11642 +} __no_const;
11643
11644 /**
11645 * struct x86_init_oem - oem platform specific customizing functions
11646 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11647 struct x86_init_oem {
11648 void (*arch_setup)(void);
11649 void (*banner)(void);
11650 -};
11651 +} __no_const;
11652
11653 /**
11654 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11655 @@ -76,7 +76,7 @@ struct x86_init_oem {
11656 */
11657 struct x86_init_mapping {
11658 void (*pagetable_reserve)(u64 start, u64 end);
11659 -};
11660 +} __no_const;
11661
11662 /**
11663 * struct x86_init_paging - platform specific paging functions
11664 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11665 struct x86_init_paging {
11666 void (*pagetable_setup_start)(pgd_t *base);
11667 void (*pagetable_setup_done)(pgd_t *base);
11668 -};
11669 +} __no_const;
11670
11671 /**
11672 * struct x86_init_timers - platform specific timer setup
11673 @@ -101,7 +101,7 @@ struct x86_init_timers {
11674 void (*tsc_pre_init)(void);
11675 void (*timer_init)(void);
11676 void (*wallclock_init)(void);
11677 -};
11678 +} __no_const;
11679
11680 /**
11681 * struct x86_init_iommu - platform specific iommu setup
11682 @@ -109,7 +109,7 @@ struct x86_init_timers {
11683 */
11684 struct x86_init_iommu {
11685 int (*iommu_init)(void);
11686 -};
11687 +} __no_const;
11688
11689 /**
11690 * struct x86_init_pci - platform specific pci init functions
11691 @@ -123,7 +123,7 @@ struct x86_init_pci {
11692 int (*init)(void);
11693 void (*init_irq)(void);
11694 void (*fixup_irqs)(void);
11695 -};
11696 +} __no_const;
11697
11698 /**
11699 * struct x86_init_ops - functions for platform specific setup
11700 @@ -139,7 +139,7 @@ struct x86_init_ops {
11701 struct x86_init_timers timers;
11702 struct x86_init_iommu iommu;
11703 struct x86_init_pci pci;
11704 -};
11705 +} __no_const;
11706
11707 /**
11708 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11709 @@ -147,7 +147,7 @@ struct x86_init_ops {
11710 */
11711 struct x86_cpuinit_ops {
11712 void (*setup_percpu_clockev)(void);
11713 -};
11714 +} __no_const;
11715
11716 /**
11717 * struct x86_platform_ops - platform specific runtime functions
11718 @@ -169,7 +169,7 @@ struct x86_platform_ops {
11719 void (*nmi_init)(void);
11720 unsigned char (*get_nmi_reason)(void);
11721 int (*i8042_detect)(void);
11722 -};
11723 +} __no_const;
11724
11725 struct pci_dev;
11726
11727 @@ -177,7 +177,7 @@ struct x86_msi_ops {
11728 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11729 void (*teardown_msi_irq)(unsigned int irq);
11730 void (*teardown_msi_irqs)(struct pci_dev *dev);
11731 -};
11732 +} __no_const;
11733
11734 extern struct x86_init_ops x86_init;
11735 extern struct x86_cpuinit_ops x86_cpuinit;
11736 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11737 index c6ce245..ffbdab7 100644
11738 --- a/arch/x86/include/asm/xsave.h
11739 +++ b/arch/x86/include/asm/xsave.h
11740 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11741 {
11742 int err;
11743
11744 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11745 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11746 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11747 +#endif
11748 +
11749 /*
11750 * Clear the xsave header first, so that reserved fields are
11751 * initialized to zero.
11752 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11753 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11754 {
11755 int err;
11756 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11757 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11758 u32 lmask = mask;
11759 u32 hmask = mask >> 32;
11760
11761 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11762 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11763 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11764 +#endif
11765 +
11766 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11767 "2:\n"
11768 ".section .fixup,\"ax\"\n"
11769 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11770 index 6a564ac..9b1340c 100644
11771 --- a/arch/x86/kernel/acpi/realmode/Makefile
11772 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11773 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11774 $(call cc-option, -fno-stack-protector) \
11775 $(call cc-option, -mpreferred-stack-boundary=2)
11776 KBUILD_CFLAGS += $(call cc-option, -m32)
11777 +ifdef CONSTIFY_PLUGIN
11778 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11779 +endif
11780 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11781 GCOV_PROFILE := n
11782
11783 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11784 index b4fd836..4358fe3 100644
11785 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11786 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11787 @@ -108,6 +108,9 @@ wakeup_code:
11788 /* Do any other stuff... */
11789
11790 #ifndef CONFIG_64BIT
11791 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11792 + call verify_cpu
11793 +
11794 /* This could also be done in C code... */
11795 movl pmode_cr3, %eax
11796 movl %eax, %cr3
11797 @@ -131,6 +134,7 @@ wakeup_code:
11798 movl pmode_cr0, %eax
11799 movl %eax, %cr0
11800 jmp pmode_return
11801 +# include "../../verify_cpu.S"
11802 #else
11803 pushw $0
11804 pushw trampoline_segment
11805 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11806 index 103b6ab..2004d0a 100644
11807 --- a/arch/x86/kernel/acpi/sleep.c
11808 +++ b/arch/x86/kernel/acpi/sleep.c
11809 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11810 header->trampoline_segment = trampoline_address() >> 4;
11811 #ifdef CONFIG_SMP
11812 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11813 +
11814 + pax_open_kernel();
11815 early_gdt_descr.address =
11816 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11817 + pax_close_kernel();
11818 +
11819 initial_gs = per_cpu_offset(smp_processor_id());
11820 #endif
11821 initial_code = (unsigned long)wakeup_long64;
11822 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11823 index 13ab720..95d5442 100644
11824 --- a/arch/x86/kernel/acpi/wakeup_32.S
11825 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11826 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11827 # and restore the stack ... but you need gdt for this to work
11828 movl saved_context_esp, %esp
11829
11830 - movl %cs:saved_magic, %eax
11831 - cmpl $0x12345678, %eax
11832 + cmpl $0x12345678, saved_magic
11833 jne bogus_magic
11834
11835 # jump to place where we left off
11836 - movl saved_eip, %eax
11837 - jmp *%eax
11838 + jmp *(saved_eip)
11839
11840 bogus_magic:
11841 jmp bogus_magic
11842 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11843 index 1f84794..e23f862 100644
11844 --- a/arch/x86/kernel/alternative.c
11845 +++ b/arch/x86/kernel/alternative.c
11846 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11847 */
11848 for (a = start; a < end; a++) {
11849 instr = (u8 *)&a->instr_offset + a->instr_offset;
11850 +
11851 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11852 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11853 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11854 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11855 +#endif
11856 +
11857 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11858 BUG_ON(a->replacementlen > a->instrlen);
11859 BUG_ON(a->instrlen > sizeof(insnbuf));
11860 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11861 for (poff = start; poff < end; poff++) {
11862 u8 *ptr = (u8 *)poff + *poff;
11863
11864 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11865 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11866 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11867 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11868 +#endif
11869 +
11870 if (!*poff || ptr < text || ptr >= text_end)
11871 continue;
11872 /* turn DS segment override prefix into lock prefix */
11873 - if (*ptr == 0x3e)
11874 + if (*ktla_ktva(ptr) == 0x3e)
11875 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11876 };
11877 mutex_unlock(&text_mutex);
11878 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11879 for (poff = start; poff < end; poff++) {
11880 u8 *ptr = (u8 *)poff + *poff;
11881
11882 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11883 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11884 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11885 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11886 +#endif
11887 +
11888 if (!*poff || ptr < text || ptr >= text_end)
11889 continue;
11890 /* turn lock prefix into DS segment override prefix */
11891 - if (*ptr == 0xf0)
11892 + if (*ktla_ktva(ptr) == 0xf0)
11893 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11894 };
11895 mutex_unlock(&text_mutex);
11896 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11897
11898 BUG_ON(p->len > MAX_PATCH_LEN);
11899 /* prep the buffer with the original instructions */
11900 - memcpy(insnbuf, p->instr, p->len);
11901 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11902 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11903 (unsigned long)p->instr, p->len);
11904
11905 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11906 if (smp_alt_once)
11907 free_init_pages("SMP alternatives",
11908 (unsigned long)__smp_locks,
11909 - (unsigned long)__smp_locks_end);
11910 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11911
11912 restart_nmi();
11913 }
11914 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11915 * instructions. And on the local CPU you need to be protected again NMI or MCE
11916 * handlers seeing an inconsistent instruction while you patch.
11917 */
11918 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11919 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11920 size_t len)
11921 {
11922 unsigned long flags;
11923 local_irq_save(flags);
11924 - memcpy(addr, opcode, len);
11925 +
11926 + pax_open_kernel();
11927 + memcpy(ktla_ktva(addr), opcode, len);
11928 sync_core();
11929 + pax_close_kernel();
11930 +
11931 local_irq_restore(flags);
11932 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11933 that causes hangs on some VIA CPUs. */
11934 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11935 */
11936 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11937 {
11938 - unsigned long flags;
11939 - char *vaddr;
11940 + unsigned char *vaddr = ktla_ktva(addr);
11941 struct page *pages[2];
11942 - int i;
11943 + size_t i;
11944
11945 if (!core_kernel_text((unsigned long)addr)) {
11946 - pages[0] = vmalloc_to_page(addr);
11947 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11948 + pages[0] = vmalloc_to_page(vaddr);
11949 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11950 } else {
11951 - pages[0] = virt_to_page(addr);
11952 + pages[0] = virt_to_page(vaddr);
11953 WARN_ON(!PageReserved(pages[0]));
11954 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11955 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11956 }
11957 BUG_ON(!pages[0]);
11958 - local_irq_save(flags);
11959 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11960 - if (pages[1])
11961 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11962 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11963 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11964 - clear_fixmap(FIX_TEXT_POKE0);
11965 - if (pages[1])
11966 - clear_fixmap(FIX_TEXT_POKE1);
11967 - local_flush_tlb();
11968 - sync_core();
11969 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11970 - that causes hangs on some VIA CPUs. */
11971 + text_poke_early(addr, opcode, len);
11972 for (i = 0; i < len; i++)
11973 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11974 - local_irq_restore(flags);
11975 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11976 return addr;
11977 }
11978
11979 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11980 index f98d84c..e402a69 100644
11981 --- a/arch/x86/kernel/apic/apic.c
11982 +++ b/arch/x86/kernel/apic/apic.c
11983 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11984 /*
11985 * Debug level, exported for io_apic.c
11986 */
11987 -unsigned int apic_verbosity;
11988 +int apic_verbosity;
11989
11990 int pic_mode;
11991
11992 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11993 apic_write(APIC_ESR, 0);
11994 v1 = apic_read(APIC_ESR);
11995 ack_APIC_irq();
11996 - atomic_inc(&irq_err_count);
11997 + atomic_inc_unchecked(&irq_err_count);
11998
11999 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
12000 smp_processor_id(), v0 , v1);
12001 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
12002 index 6d939d7..0697fcc 100644
12003 --- a/arch/x86/kernel/apic/io_apic.c
12004 +++ b/arch/x86/kernel/apic/io_apic.c
12005 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
12006 }
12007 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
12008
12009 -void lock_vector_lock(void)
12010 +void lock_vector_lock(void) __acquires(vector_lock)
12011 {
12012 /* Used to the online set of cpus does not change
12013 * during assign_irq_vector.
12014 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
12015 raw_spin_lock(&vector_lock);
12016 }
12017
12018 -void unlock_vector_lock(void)
12019 +void unlock_vector_lock(void) __releases(vector_lock)
12020 {
12021 raw_spin_unlock(&vector_lock);
12022 }
12023 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
12024 ack_APIC_irq();
12025 }
12026
12027 -atomic_t irq_mis_count;
12028 +atomic_unchecked_t irq_mis_count;
12029
12030 static void ack_apic_level(struct irq_data *data)
12031 {
12032 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
12033 * at the cpu.
12034 */
12035 if (!(v & (1 << (i & 0x1f)))) {
12036 - atomic_inc(&irq_mis_count);
12037 + atomic_inc_unchecked(&irq_mis_count);
12038
12039 eoi_ioapic_irq(irq, cfg);
12040 }
12041 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
12042 index a46bd38..6b906d7 100644
12043 --- a/arch/x86/kernel/apm_32.c
12044 +++ b/arch/x86/kernel/apm_32.c
12045 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
12046 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12047 * even though they are called in protected mode.
12048 */
12049 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12050 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12051 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12052
12053 static const char driver_version[] = "1.16ac"; /* no spaces */
12054 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
12055 BUG_ON(cpu != 0);
12056 gdt = get_cpu_gdt_table(cpu);
12057 save_desc_40 = gdt[0x40 / 8];
12058 +
12059 + pax_open_kernel();
12060 gdt[0x40 / 8] = bad_bios_desc;
12061 + pax_close_kernel();
12062
12063 apm_irq_save(flags);
12064 APM_DO_SAVE_SEGS;
12065 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
12066 &call->esi);
12067 APM_DO_RESTORE_SEGS;
12068 apm_irq_restore(flags);
12069 +
12070 + pax_open_kernel();
12071 gdt[0x40 / 8] = save_desc_40;
12072 + pax_close_kernel();
12073 +
12074 put_cpu();
12075
12076 return call->eax & 0xff;
12077 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
12078 BUG_ON(cpu != 0);
12079 gdt = get_cpu_gdt_table(cpu);
12080 save_desc_40 = gdt[0x40 / 8];
12081 +
12082 + pax_open_kernel();
12083 gdt[0x40 / 8] = bad_bios_desc;
12084 + pax_close_kernel();
12085
12086 apm_irq_save(flags);
12087 APM_DO_SAVE_SEGS;
12088 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
12089 &call->eax);
12090 APM_DO_RESTORE_SEGS;
12091 apm_irq_restore(flags);
12092 +
12093 + pax_open_kernel();
12094 gdt[0x40 / 8] = save_desc_40;
12095 + pax_close_kernel();
12096 +
12097 put_cpu();
12098 return error;
12099 }
12100 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
12101 * code to that CPU.
12102 */
12103 gdt = get_cpu_gdt_table(0);
12104 +
12105 + pax_open_kernel();
12106 set_desc_base(&gdt[APM_CS >> 3],
12107 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12108 set_desc_base(&gdt[APM_CS_16 >> 3],
12109 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12110 set_desc_base(&gdt[APM_DS >> 3],
12111 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12112 + pax_close_kernel();
12113
12114 proc_create("apm", 0, NULL, &apm_file_ops);
12115
12116 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
12117 index 4f13faf..87db5d2 100644
12118 --- a/arch/x86/kernel/asm-offsets.c
12119 +++ b/arch/x86/kernel/asm-offsets.c
12120 @@ -33,6 +33,8 @@ void common(void) {
12121 OFFSET(TI_status, thread_info, status);
12122 OFFSET(TI_addr_limit, thread_info, addr_limit);
12123 OFFSET(TI_preempt_count, thread_info, preempt_count);
12124 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12125 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12126
12127 BLANK();
12128 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
12129 @@ -53,8 +55,26 @@ void common(void) {
12130 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12131 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12132 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12133 +
12134 +#ifdef CONFIG_PAX_KERNEXEC
12135 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12136 #endif
12137
12138 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12139 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12140 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12141 +#ifdef CONFIG_X86_64
12142 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12143 +#endif
12144 +#endif
12145 +
12146 +#endif
12147 +
12148 + BLANK();
12149 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12150 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12151 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12152 +
12153 #ifdef CONFIG_XEN
12154 BLANK();
12155 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12156 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
12157 index e72a119..6e2955d 100644
12158 --- a/arch/x86/kernel/asm-offsets_64.c
12159 +++ b/arch/x86/kernel/asm-offsets_64.c
12160 @@ -69,6 +69,7 @@ int main(void)
12161 BLANK();
12162 #undef ENTRY
12163
12164 + DEFINE(TSS_size, sizeof(struct tss_struct));
12165 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
12166 BLANK();
12167
12168 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
12169 index 25f24dc..4094a7f 100644
12170 --- a/arch/x86/kernel/cpu/Makefile
12171 +++ b/arch/x86/kernel/cpu/Makefile
12172 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
12173 CFLAGS_REMOVE_perf_event.o = -pg
12174 endif
12175
12176 -# Make sure load_percpu_segment has no stackprotector
12177 -nostackp := $(call cc-option, -fno-stack-protector)
12178 -CFLAGS_common.o := $(nostackp)
12179 -
12180 obj-y := intel_cacheinfo.o scattered.o topology.o
12181 obj-y += proc.o capflags.o powerflags.o common.o
12182 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
12183 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
12184 index 0bab2b1..d0a1bf8 100644
12185 --- a/arch/x86/kernel/cpu/amd.c
12186 +++ b/arch/x86/kernel/cpu/amd.c
12187 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
12188 unsigned int size)
12189 {
12190 /* AMD errata T13 (order #21922) */
12191 - if ((c->x86 == 6)) {
12192 + if (c->x86 == 6) {
12193 /* Duron Rev A0 */
12194 if (c->x86_model == 3 && c->x86_mask == 0)
12195 size = 64;
12196 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
12197 index aa003b1..47ea638 100644
12198 --- a/arch/x86/kernel/cpu/common.c
12199 +++ b/arch/x86/kernel/cpu/common.c
12200 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
12201
12202 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12203
12204 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12205 -#ifdef CONFIG_X86_64
12206 - /*
12207 - * We need valid kernel segments for data and code in long mode too
12208 - * IRET will check the segment types kkeil 2000/10/28
12209 - * Also sysret mandates a special GDT layout
12210 - *
12211 - * TLS descriptors are currently at a different place compared to i386.
12212 - * Hopefully nobody expects them at a fixed place (Wine?)
12213 - */
12214 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12215 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12216 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12217 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12218 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12219 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12220 -#else
12221 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12222 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12223 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12224 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12225 - /*
12226 - * Segments used for calling PnP BIOS have byte granularity.
12227 - * They code segments and data segments have fixed 64k limits,
12228 - * the transfer segment sizes are set at run time.
12229 - */
12230 - /* 32-bit code */
12231 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12232 - /* 16-bit code */
12233 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12234 - /* 16-bit data */
12235 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12236 - /* 16-bit data */
12237 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12238 - /* 16-bit data */
12239 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12240 - /*
12241 - * The APM segments have byte granularity and their bases
12242 - * are set at run time. All have 64k limits.
12243 - */
12244 - /* 32-bit code */
12245 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12246 - /* 16-bit code */
12247 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12248 - /* data */
12249 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12250 -
12251 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12252 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12253 - GDT_STACK_CANARY_INIT
12254 -#endif
12255 -} };
12256 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12257 -
12258 static int __init x86_xsave_setup(char *s)
12259 {
12260 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12261 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12262 {
12263 struct desc_ptr gdt_descr;
12264
12265 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12266 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12267 gdt_descr.size = GDT_SIZE - 1;
12268 load_gdt(&gdt_descr);
12269 /* Reload the per-cpu base */
12270 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12271 /* Filter out anything that depends on CPUID levels we don't have */
12272 filter_cpuid_features(c, true);
12273
12274 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12275 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12276 +#endif
12277 +
12278 /* If the model name is still unset, do table lookup. */
12279 if (!c->x86_model_id[0]) {
12280 const char *p;
12281 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12282 }
12283 __setup("clearcpuid=", setup_disablecpuid);
12284
12285 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12286 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12287 +
12288 #ifdef CONFIG_X86_64
12289 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12290
12291 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12292 EXPORT_PER_CPU_SYMBOL(current_task);
12293
12294 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12295 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12296 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12297 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12298
12299 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12300 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12301 {
12302 memset(regs, 0, sizeof(struct pt_regs));
12303 regs->fs = __KERNEL_PERCPU;
12304 - regs->gs = __KERNEL_STACK_CANARY;
12305 + savesegment(gs, regs->gs);
12306
12307 return regs;
12308 }
12309 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12310 int i;
12311
12312 cpu = stack_smp_processor_id();
12313 - t = &per_cpu(init_tss, cpu);
12314 + t = init_tss + cpu;
12315 oist = &per_cpu(orig_ist, cpu);
12316
12317 #ifdef CONFIG_NUMA
12318 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12319 switch_to_new_gdt(cpu);
12320 loadsegment(fs, 0);
12321
12322 - load_idt((const struct desc_ptr *)&idt_descr);
12323 + load_idt(&idt_descr);
12324
12325 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12326 syscall_init();
12327 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12328 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12329 barrier();
12330
12331 - x86_configure_nx();
12332 if (cpu != 0)
12333 enable_x2apic();
12334
12335 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12336 {
12337 int cpu = smp_processor_id();
12338 struct task_struct *curr = current;
12339 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12340 + struct tss_struct *t = init_tss + cpu;
12341 struct thread_struct *thread = &curr->thread;
12342
12343 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12344 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12345 index 5231312..a78a987 100644
12346 --- a/arch/x86/kernel/cpu/intel.c
12347 +++ b/arch/x86/kernel/cpu/intel.c
12348 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12349 * Update the IDT descriptor and reload the IDT so that
12350 * it uses the read-only mapped virtual address.
12351 */
12352 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12353 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12354 load_idt(&idt_descr);
12355 }
12356 #endif
12357 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12358 index 2af127d..8ff7ac0 100644
12359 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12360 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12361 @@ -42,6 +42,7 @@
12362 #include <asm/processor.h>
12363 #include <asm/mce.h>
12364 #include <asm/msr.h>
12365 +#include <asm/local.h>
12366
12367 #include "mce-internal.h"
12368
12369 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12370 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12371 m->cs, m->ip);
12372
12373 - if (m->cs == __KERNEL_CS)
12374 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12375 print_symbol("{%s}", m->ip);
12376 pr_cont("\n");
12377 }
12378 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12379
12380 #define PANIC_TIMEOUT 5 /* 5 seconds */
12381
12382 -static atomic_t mce_paniced;
12383 +static atomic_unchecked_t mce_paniced;
12384
12385 static int fake_panic;
12386 -static atomic_t mce_fake_paniced;
12387 +static atomic_unchecked_t mce_fake_paniced;
12388
12389 /* Panic in progress. Enable interrupts and wait for final IPI */
12390 static void wait_for_panic(void)
12391 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12392 /*
12393 * Make sure only one CPU runs in machine check panic
12394 */
12395 - if (atomic_inc_return(&mce_paniced) > 1)
12396 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12397 wait_for_panic();
12398 barrier();
12399
12400 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12401 console_verbose();
12402 } else {
12403 /* Don't log too much for fake panic */
12404 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12405 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12406 return;
12407 }
12408 /* First print corrected ones that are still unlogged */
12409 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12410 * might have been modified by someone else.
12411 */
12412 rmb();
12413 - if (atomic_read(&mce_paniced))
12414 + if (atomic_read_unchecked(&mce_paniced))
12415 wait_for_panic();
12416 if (!monarch_timeout)
12417 goto out;
12418 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12419 }
12420
12421 /* Call the installed machine check handler for this CPU setup. */
12422 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12423 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12424 unexpected_machine_check;
12425
12426 /*
12427 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12428 return;
12429 }
12430
12431 + pax_open_kernel();
12432 machine_check_vector = do_machine_check;
12433 + pax_close_kernel();
12434
12435 __mcheck_cpu_init_generic();
12436 __mcheck_cpu_init_vendor(c);
12437 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12438 */
12439
12440 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12441 -static int mce_chrdev_open_count; /* #times opened */
12442 +static local_t mce_chrdev_open_count; /* #times opened */
12443 static int mce_chrdev_open_exclu; /* already open exclusive? */
12444
12445 static int mce_chrdev_open(struct inode *inode, struct file *file)
12446 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12447 spin_lock(&mce_chrdev_state_lock);
12448
12449 if (mce_chrdev_open_exclu ||
12450 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12451 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12452 spin_unlock(&mce_chrdev_state_lock);
12453
12454 return -EBUSY;
12455 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12456
12457 if (file->f_flags & O_EXCL)
12458 mce_chrdev_open_exclu = 1;
12459 - mce_chrdev_open_count++;
12460 + local_inc(&mce_chrdev_open_count);
12461
12462 spin_unlock(&mce_chrdev_state_lock);
12463
12464 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12465 {
12466 spin_lock(&mce_chrdev_state_lock);
12467
12468 - mce_chrdev_open_count--;
12469 + local_dec(&mce_chrdev_open_count);
12470 mce_chrdev_open_exclu = 0;
12471
12472 spin_unlock(&mce_chrdev_state_lock);
12473 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12474 static void mce_reset(void)
12475 {
12476 cpu_missing = 0;
12477 - atomic_set(&mce_fake_paniced, 0);
12478 + atomic_set_unchecked(&mce_fake_paniced, 0);
12479 atomic_set(&mce_executing, 0);
12480 atomic_set(&mce_callin, 0);
12481 atomic_set(&global_nwo, 0);
12482 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12483 index 5c0e653..0882b0a 100644
12484 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12485 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12486 @@ -12,6 +12,7 @@
12487 #include <asm/system.h>
12488 #include <asm/mce.h>
12489 #include <asm/msr.h>
12490 +#include <asm/pgtable.h>
12491
12492 /* By default disabled */
12493 int mce_p5_enabled __read_mostly;
12494 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12495 if (!cpu_has(c, X86_FEATURE_MCE))
12496 return;
12497
12498 + pax_open_kernel();
12499 machine_check_vector = pentium_machine_check;
12500 + pax_close_kernel();
12501 /* Make sure the vector pointer is visible before we enable MCEs: */
12502 wmb();
12503
12504 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12505 index 54060f5..c1a7577 100644
12506 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12507 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12508 @@ -11,6 +11,7 @@
12509 #include <asm/system.h>
12510 #include <asm/mce.h>
12511 #include <asm/msr.h>
12512 +#include <asm/pgtable.h>
12513
12514 /* Machine check handler for WinChip C6: */
12515 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12516 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12517 {
12518 u32 lo, hi;
12519
12520 + pax_open_kernel();
12521 machine_check_vector = winchip_machine_check;
12522 + pax_close_kernel();
12523 /* Make sure the vector pointer is visible before we enable MCEs: */
12524 wmb();
12525
12526 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12527 index 6b96110..0da73eb 100644
12528 --- a/arch/x86/kernel/cpu/mtrr/main.c
12529 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12530 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12531 u64 size_or_mask, size_and_mask;
12532 static bool mtrr_aps_delayed_init;
12533
12534 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12535 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12536
12537 const struct mtrr_ops *mtrr_if;
12538
12539 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12540 index df5e41f..816c719 100644
12541 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12542 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12543 @@ -25,7 +25,7 @@ struct mtrr_ops {
12544 int (*validate_add_page)(unsigned long base, unsigned long size,
12545 unsigned int type);
12546 int (*have_wrcomb)(void);
12547 -};
12548 +} __do_const;
12549
12550 extern int generic_get_free_region(unsigned long base, unsigned long size,
12551 int replace_reg);
12552 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12553 index 2bda212..78cc605 100644
12554 --- a/arch/x86/kernel/cpu/perf_event.c
12555 +++ b/arch/x86/kernel/cpu/perf_event.c
12556 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12557 break;
12558
12559 perf_callchain_store(entry, frame.return_address);
12560 - fp = frame.next_frame;
12561 + fp = (const void __force_user *)frame.next_frame;
12562 }
12563 }
12564
12565 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12566 index 13ad899..f642b9a 100644
12567 --- a/arch/x86/kernel/crash.c
12568 +++ b/arch/x86/kernel/crash.c
12569 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12570 {
12571 #ifdef CONFIG_X86_32
12572 struct pt_regs fixed_regs;
12573 -#endif
12574
12575 -#ifdef CONFIG_X86_32
12576 - if (!user_mode_vm(regs)) {
12577 + if (!user_mode(regs)) {
12578 crash_fixup_ss_esp(&fixed_regs, regs);
12579 regs = &fixed_regs;
12580 }
12581 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12582 index 37250fe..bf2ec74 100644
12583 --- a/arch/x86/kernel/doublefault_32.c
12584 +++ b/arch/x86/kernel/doublefault_32.c
12585 @@ -11,7 +11,7 @@
12586
12587 #define DOUBLEFAULT_STACKSIZE (1024)
12588 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12589 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12590 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12591
12592 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12593
12594 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12595 unsigned long gdt, tss;
12596
12597 store_gdt(&gdt_desc);
12598 - gdt = gdt_desc.address;
12599 + gdt = (unsigned long)gdt_desc.address;
12600
12601 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12602
12603 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12604 /* 0x2 bit is always set */
12605 .flags = X86_EFLAGS_SF | 0x2,
12606 .sp = STACK_START,
12607 - .es = __USER_DS,
12608 + .es = __KERNEL_DS,
12609 .cs = __KERNEL_CS,
12610 .ss = __KERNEL_DS,
12611 - .ds = __USER_DS,
12612 + .ds = __KERNEL_DS,
12613 .fs = __KERNEL_PERCPU,
12614
12615 .__cr3 = __pa_nodebug(swapper_pg_dir),
12616 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12617 index 1aae78f..aab3a3d 100644
12618 --- a/arch/x86/kernel/dumpstack.c
12619 +++ b/arch/x86/kernel/dumpstack.c
12620 @@ -2,6 +2,9 @@
12621 * Copyright (C) 1991, 1992 Linus Torvalds
12622 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12623 */
12624 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12625 +#define __INCLUDED_BY_HIDESYM 1
12626 +#endif
12627 #include <linux/kallsyms.h>
12628 #include <linux/kprobes.h>
12629 #include <linux/uaccess.h>
12630 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12631 static void
12632 print_ftrace_graph_addr(unsigned long addr, void *data,
12633 const struct stacktrace_ops *ops,
12634 - struct thread_info *tinfo, int *graph)
12635 + struct task_struct *task, int *graph)
12636 {
12637 - struct task_struct *task = tinfo->task;
12638 unsigned long ret_addr;
12639 int index = task->curr_ret_stack;
12640
12641 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12642 static inline void
12643 print_ftrace_graph_addr(unsigned long addr, void *data,
12644 const struct stacktrace_ops *ops,
12645 - struct thread_info *tinfo, int *graph)
12646 + struct task_struct *task, int *graph)
12647 { }
12648 #endif
12649
12650 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12651 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12652 */
12653
12654 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12655 - void *p, unsigned int size, void *end)
12656 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12657 {
12658 - void *t = tinfo;
12659 if (end) {
12660 if (p < end && p >= (end-THREAD_SIZE))
12661 return 1;
12662 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12663 }
12664
12665 unsigned long
12666 -print_context_stack(struct thread_info *tinfo,
12667 +print_context_stack(struct task_struct *task, void *stack_start,
12668 unsigned long *stack, unsigned long bp,
12669 const struct stacktrace_ops *ops, void *data,
12670 unsigned long *end, int *graph)
12671 {
12672 struct stack_frame *frame = (struct stack_frame *)bp;
12673
12674 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12675 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12676 unsigned long addr;
12677
12678 addr = *stack;
12679 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12680 } else {
12681 ops->address(data, addr, 0);
12682 }
12683 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12684 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12685 }
12686 stack++;
12687 }
12688 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12689 EXPORT_SYMBOL_GPL(print_context_stack);
12690
12691 unsigned long
12692 -print_context_stack_bp(struct thread_info *tinfo,
12693 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12694 unsigned long *stack, unsigned long bp,
12695 const struct stacktrace_ops *ops, void *data,
12696 unsigned long *end, int *graph)
12697 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12698 struct stack_frame *frame = (struct stack_frame *)bp;
12699 unsigned long *ret_addr = &frame->return_address;
12700
12701 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12702 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12703 unsigned long addr = *ret_addr;
12704
12705 if (!__kernel_text_address(addr))
12706 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12707 ops->address(data, addr, 1);
12708 frame = frame->next_frame;
12709 ret_addr = &frame->return_address;
12710 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12711 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12712 }
12713
12714 return (unsigned long)frame;
12715 @@ -186,7 +186,7 @@ void dump_stack(void)
12716
12717 bp = stack_frame(current, NULL);
12718 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12719 - current->pid, current->comm, print_tainted(),
12720 + task_pid_nr(current), current->comm, print_tainted(),
12721 init_utsname()->release,
12722 (int)strcspn(init_utsname()->version, " "),
12723 init_utsname()->version);
12724 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12725 }
12726 EXPORT_SYMBOL_GPL(oops_begin);
12727
12728 +extern void gr_handle_kernel_exploit(void);
12729 +
12730 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12731 {
12732 if (regs && kexec_should_crash(current))
12733 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12734 panic("Fatal exception in interrupt");
12735 if (panic_on_oops)
12736 panic("Fatal exception");
12737 - do_exit(signr);
12738 +
12739 + gr_handle_kernel_exploit();
12740 +
12741 + do_group_exit(signr);
12742 }
12743
12744 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12745 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12746
12747 show_registers(regs);
12748 #ifdef CONFIG_X86_32
12749 - if (user_mode_vm(regs)) {
12750 + if (user_mode(regs)) {
12751 sp = regs->sp;
12752 ss = regs->ss & 0xffff;
12753 } else {
12754 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12755 unsigned long flags = oops_begin();
12756 int sig = SIGSEGV;
12757
12758 - if (!user_mode_vm(regs))
12759 + if (!user_mode(regs))
12760 report_bug(regs->ip, regs);
12761
12762 if (__die(str, regs, err))
12763 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12764 index c99f9ed..2a15d80 100644
12765 --- a/arch/x86/kernel/dumpstack_32.c
12766 +++ b/arch/x86/kernel/dumpstack_32.c
12767 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12768 bp = stack_frame(task, regs);
12769
12770 for (;;) {
12771 - struct thread_info *context;
12772 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12773
12774 - context = (struct thread_info *)
12775 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12776 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12777 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12778
12779 - stack = (unsigned long *)context->previous_esp;
12780 - if (!stack)
12781 + if (stack_start == task_stack_page(task))
12782 break;
12783 + stack = *(unsigned long **)stack_start;
12784 if (ops->stack(data, "IRQ") < 0)
12785 break;
12786 touch_nmi_watchdog();
12787 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12788 * When in-kernel, we also print out the stack and code at the
12789 * time of the fault..
12790 */
12791 - if (!user_mode_vm(regs)) {
12792 + if (!user_mode(regs)) {
12793 unsigned int code_prologue = code_bytes * 43 / 64;
12794 unsigned int code_len = code_bytes;
12795 unsigned char c;
12796 u8 *ip;
12797 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12798
12799 printk(KERN_EMERG "Stack:\n");
12800 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12801
12802 printk(KERN_EMERG "Code: ");
12803
12804 - ip = (u8 *)regs->ip - code_prologue;
12805 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12806 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12807 /* try starting at IP */
12808 - ip = (u8 *)regs->ip;
12809 + ip = (u8 *)regs->ip + cs_base;
12810 code_len = code_len - code_prologue + 1;
12811 }
12812 for (i = 0; i < code_len; i++, ip++) {
12813 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12814 printk(KERN_CONT " Bad EIP value.");
12815 break;
12816 }
12817 - if (ip == (u8 *)regs->ip)
12818 + if (ip == (u8 *)regs->ip + cs_base)
12819 printk(KERN_CONT "<%02x> ", c);
12820 else
12821 printk(KERN_CONT "%02x ", c);
12822 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12823 {
12824 unsigned short ud2;
12825
12826 + ip = ktla_ktva(ip);
12827 if (ip < PAGE_OFFSET)
12828 return 0;
12829 if (probe_kernel_address((unsigned short *)ip, ud2))
12830 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12831
12832 return ud2 == 0x0b0f;
12833 }
12834 +
12835 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12836 +void pax_check_alloca(unsigned long size)
12837 +{
12838 + unsigned long sp = (unsigned long)&sp, stack_left;
12839 +
12840 + /* all kernel stacks are of the same size */
12841 + stack_left = sp & (THREAD_SIZE - 1);
12842 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12843 +}
12844 +EXPORT_SYMBOL(pax_check_alloca);
12845 +#endif
12846 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12847 index 6d728d9..279514e 100644
12848 --- a/arch/x86/kernel/dumpstack_64.c
12849 +++ b/arch/x86/kernel/dumpstack_64.c
12850 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12851 unsigned long *irq_stack_end =
12852 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12853 unsigned used = 0;
12854 - struct thread_info *tinfo;
12855 int graph = 0;
12856 unsigned long dummy;
12857 + void *stack_start;
12858
12859 if (!task)
12860 task = current;
12861 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12862 * current stack address. If the stacks consist of nested
12863 * exceptions
12864 */
12865 - tinfo = task_thread_info(task);
12866 for (;;) {
12867 char *id;
12868 unsigned long *estack_end;
12869 +
12870 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12871 &used, &id);
12872
12873 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12874 if (ops->stack(data, id) < 0)
12875 break;
12876
12877 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12878 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12879 data, estack_end, &graph);
12880 ops->stack(data, "<EOE>");
12881 /*
12882 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12883 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12884 if (ops->stack(data, "IRQ") < 0)
12885 break;
12886 - bp = ops->walk_stack(tinfo, stack, bp,
12887 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12888 ops, data, irq_stack_end, &graph);
12889 /*
12890 * We link to the next stack (which would be
12891 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12892 /*
12893 * This handles the process stack:
12894 */
12895 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12896 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12897 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12898 put_cpu();
12899 }
12900 EXPORT_SYMBOL(dump_trace);
12901 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12902
12903 return ud2 == 0x0b0f;
12904 }
12905 +
12906 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12907 +void pax_check_alloca(unsigned long size)
12908 +{
12909 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12910 + unsigned cpu, used;
12911 + char *id;
12912 +
12913 + /* check the process stack first */
12914 + stack_start = (unsigned long)task_stack_page(current);
12915 + stack_end = stack_start + THREAD_SIZE;
12916 + if (likely(stack_start <= sp && sp < stack_end)) {
12917 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12918 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12919 + return;
12920 + }
12921 +
12922 + cpu = get_cpu();
12923 +
12924 + /* check the irq stacks */
12925 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12926 + stack_start = stack_end - IRQ_STACK_SIZE;
12927 + if (stack_start <= sp && sp < stack_end) {
12928 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12929 + put_cpu();
12930 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12931 + return;
12932 + }
12933 +
12934 + /* check the exception stacks */
12935 + used = 0;
12936 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12937 + stack_start = stack_end - EXCEPTION_STKSZ;
12938 + if (stack_end && stack_start <= sp && sp < stack_end) {
12939 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12940 + put_cpu();
12941 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12942 + return;
12943 + }
12944 +
12945 + put_cpu();
12946 +
12947 + /* unknown stack */
12948 + BUG();
12949 +}
12950 +EXPORT_SYMBOL(pax_check_alloca);
12951 +#endif
12952 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12953 index cd28a35..c72ed9a 100644
12954 --- a/arch/x86/kernel/early_printk.c
12955 +++ b/arch/x86/kernel/early_printk.c
12956 @@ -7,6 +7,7 @@
12957 #include <linux/pci_regs.h>
12958 #include <linux/pci_ids.h>
12959 #include <linux/errno.h>
12960 +#include <linux/sched.h>
12961 #include <asm/io.h>
12962 #include <asm/processor.h>
12963 #include <asm/fcntl.h>
12964 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12965 index f3f6f53..0841b66 100644
12966 --- a/arch/x86/kernel/entry_32.S
12967 +++ b/arch/x86/kernel/entry_32.S
12968 @@ -186,13 +186,146 @@
12969 /*CFI_REL_OFFSET gs, PT_GS*/
12970 .endm
12971 .macro SET_KERNEL_GS reg
12972 +
12973 +#ifdef CONFIG_CC_STACKPROTECTOR
12974 movl $(__KERNEL_STACK_CANARY), \reg
12975 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12976 + movl $(__USER_DS), \reg
12977 +#else
12978 + xorl \reg, \reg
12979 +#endif
12980 +
12981 movl \reg, %gs
12982 .endm
12983
12984 #endif /* CONFIG_X86_32_LAZY_GS */
12985
12986 -.macro SAVE_ALL
12987 +.macro pax_enter_kernel
12988 +#ifdef CONFIG_PAX_KERNEXEC
12989 + call pax_enter_kernel
12990 +#endif
12991 +.endm
12992 +
12993 +.macro pax_exit_kernel
12994 +#ifdef CONFIG_PAX_KERNEXEC
12995 + call pax_exit_kernel
12996 +#endif
12997 +.endm
12998 +
12999 +#ifdef CONFIG_PAX_KERNEXEC
13000 +ENTRY(pax_enter_kernel)
13001 +#ifdef CONFIG_PARAVIRT
13002 + pushl %eax
13003 + pushl %ecx
13004 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13005 + mov %eax, %esi
13006 +#else
13007 + mov %cr0, %esi
13008 +#endif
13009 + bts $16, %esi
13010 + jnc 1f
13011 + mov %cs, %esi
13012 + cmp $__KERNEL_CS, %esi
13013 + jz 3f
13014 + ljmp $__KERNEL_CS, $3f
13015 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13016 +2:
13017 +#ifdef CONFIG_PARAVIRT
13018 + mov %esi, %eax
13019 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13020 +#else
13021 + mov %esi, %cr0
13022 +#endif
13023 +3:
13024 +#ifdef CONFIG_PARAVIRT
13025 + popl %ecx
13026 + popl %eax
13027 +#endif
13028 + ret
13029 +ENDPROC(pax_enter_kernel)
13030 +
13031 +ENTRY(pax_exit_kernel)
13032 +#ifdef CONFIG_PARAVIRT
13033 + pushl %eax
13034 + pushl %ecx
13035 +#endif
13036 + mov %cs, %esi
13037 + cmp $__KERNEXEC_KERNEL_CS, %esi
13038 + jnz 2f
13039 +#ifdef CONFIG_PARAVIRT
13040 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13041 + mov %eax, %esi
13042 +#else
13043 + mov %cr0, %esi
13044 +#endif
13045 + btr $16, %esi
13046 + ljmp $__KERNEL_CS, $1f
13047 +1:
13048 +#ifdef CONFIG_PARAVIRT
13049 + mov %esi, %eax
13050 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13051 +#else
13052 + mov %esi, %cr0
13053 +#endif
13054 +2:
13055 +#ifdef CONFIG_PARAVIRT
13056 + popl %ecx
13057 + popl %eax
13058 +#endif
13059 + ret
13060 +ENDPROC(pax_exit_kernel)
13061 +#endif
13062 +
13063 +.macro pax_erase_kstack
13064 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13065 + call pax_erase_kstack
13066 +#endif
13067 +.endm
13068 +
13069 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13070 +/*
13071 + * ebp: thread_info
13072 + * ecx, edx: can be clobbered
13073 + */
13074 +ENTRY(pax_erase_kstack)
13075 + pushl %edi
13076 + pushl %eax
13077 +
13078 + mov TI_lowest_stack(%ebp), %edi
13079 + mov $-0xBEEF, %eax
13080 + std
13081 +
13082 +1: mov %edi, %ecx
13083 + and $THREAD_SIZE_asm - 1, %ecx
13084 + shr $2, %ecx
13085 + repne scasl
13086 + jecxz 2f
13087 +
13088 + cmp $2*16, %ecx
13089 + jc 2f
13090 +
13091 + mov $2*16, %ecx
13092 + repe scasl
13093 + jecxz 2f
13094 + jne 1b
13095 +
13096 +2: cld
13097 + mov %esp, %ecx
13098 + sub %edi, %ecx
13099 + shr $2, %ecx
13100 + rep stosl
13101 +
13102 + mov TI_task_thread_sp0(%ebp), %edi
13103 + sub $128, %edi
13104 + mov %edi, TI_lowest_stack(%ebp)
13105 +
13106 + popl %eax
13107 + popl %edi
13108 + ret
13109 +ENDPROC(pax_erase_kstack)
13110 +#endif
13111 +
13112 +.macro __SAVE_ALL _DS
13113 cld
13114 PUSH_GS
13115 pushl_cfi %fs
13116 @@ -215,7 +348,7 @@
13117 CFI_REL_OFFSET ecx, 0
13118 pushl_cfi %ebx
13119 CFI_REL_OFFSET ebx, 0
13120 - movl $(__USER_DS), %edx
13121 + movl $\_DS, %edx
13122 movl %edx, %ds
13123 movl %edx, %es
13124 movl $(__KERNEL_PERCPU), %edx
13125 @@ -223,6 +356,15 @@
13126 SET_KERNEL_GS %edx
13127 .endm
13128
13129 +.macro SAVE_ALL
13130 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13131 + __SAVE_ALL __KERNEL_DS
13132 + pax_enter_kernel
13133 +#else
13134 + __SAVE_ALL __USER_DS
13135 +#endif
13136 +.endm
13137 +
13138 .macro RESTORE_INT_REGS
13139 popl_cfi %ebx
13140 CFI_RESTORE ebx
13141 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
13142 popfl_cfi
13143 jmp syscall_exit
13144 CFI_ENDPROC
13145 -END(ret_from_fork)
13146 +ENDPROC(ret_from_fork)
13147
13148 /*
13149 * Interrupt exit functions should be protected against kprobes
13150 @@ -333,7 +475,15 @@ check_userspace:
13151 movb PT_CS(%esp), %al
13152 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13153 cmpl $USER_RPL, %eax
13154 +
13155 +#ifdef CONFIG_PAX_KERNEXEC
13156 + jae resume_userspace
13157 +
13158 + PAX_EXIT_KERNEL
13159 + jmp resume_kernel
13160 +#else
13161 jb resume_kernel # not returning to v8086 or userspace
13162 +#endif
13163
13164 ENTRY(resume_userspace)
13165 LOCKDEP_SYS_EXIT
13166 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
13167 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13168 # int/exception return?
13169 jne work_pending
13170 - jmp restore_all
13171 -END(ret_from_exception)
13172 + jmp restore_all_pax
13173 +ENDPROC(ret_from_exception)
13174
13175 #ifdef CONFIG_PREEMPT
13176 ENTRY(resume_kernel)
13177 @@ -361,7 +511,7 @@ need_resched:
13178 jz restore_all
13179 call preempt_schedule_irq
13180 jmp need_resched
13181 -END(resume_kernel)
13182 +ENDPROC(resume_kernel)
13183 #endif
13184 CFI_ENDPROC
13185 /*
13186 @@ -395,23 +545,34 @@ sysenter_past_esp:
13187 /*CFI_REL_OFFSET cs, 0*/
13188 /*
13189 * Push current_thread_info()->sysenter_return to the stack.
13190 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13191 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13192 */
13193 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
13194 + pushl_cfi $0
13195 CFI_REL_OFFSET eip, 0
13196
13197 pushl_cfi %eax
13198 SAVE_ALL
13199 + GET_THREAD_INFO(%ebp)
13200 + movl TI_sysenter_return(%ebp),%ebp
13201 + movl %ebp,PT_EIP(%esp)
13202 ENABLE_INTERRUPTS(CLBR_NONE)
13203
13204 /*
13205 * Load the potential sixth argument from user stack.
13206 * Careful about security.
13207 */
13208 + movl PT_OLDESP(%esp),%ebp
13209 +
13210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13211 + mov PT_OLDSS(%esp),%ds
13212 +1: movl %ds:(%ebp),%ebp
13213 + push %ss
13214 + pop %ds
13215 +#else
13216 cmpl $__PAGE_OFFSET-3,%ebp
13217 jae syscall_fault
13218 1: movl (%ebp),%ebp
13219 +#endif
13220 +
13221 movl %ebp,PT_EBP(%esp)
13222 .section __ex_table,"a"
13223 .align 4
13224 @@ -434,12 +595,24 @@ sysenter_do_call:
13225 testl $_TIF_ALLWORK_MASK, %ecx
13226 jne sysexit_audit
13227 sysenter_exit:
13228 +
13229 +#ifdef CONFIG_PAX_RANDKSTACK
13230 + pushl_cfi %eax
13231 + movl %esp, %eax
13232 + call pax_randomize_kstack
13233 + popl_cfi %eax
13234 +#endif
13235 +
13236 + pax_erase_kstack
13237 +
13238 /* if something modifies registers it must also disable sysexit */
13239 movl PT_EIP(%esp), %edx
13240 movl PT_OLDESP(%esp), %ecx
13241 xorl %ebp,%ebp
13242 TRACE_IRQS_ON
13243 1: mov PT_FS(%esp), %fs
13244 +2: mov PT_DS(%esp), %ds
13245 +3: mov PT_ES(%esp), %es
13246 PTGS_TO_GS
13247 ENABLE_INTERRUPTS_SYSEXIT
13248
13249 @@ -456,6 +629,9 @@ sysenter_audit:
13250 movl %eax,%edx /* 2nd arg: syscall number */
13251 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13252 call audit_syscall_entry
13253 +
13254 + pax_erase_kstack
13255 +
13256 pushl_cfi %ebx
13257 movl PT_EAX(%esp),%eax /* reload syscall number */
13258 jmp sysenter_do_call
13259 @@ -482,11 +658,17 @@ sysexit_audit:
13260
13261 CFI_ENDPROC
13262 .pushsection .fixup,"ax"
13263 -2: movl $0,PT_FS(%esp)
13264 +4: movl $0,PT_FS(%esp)
13265 + jmp 1b
13266 +5: movl $0,PT_DS(%esp)
13267 + jmp 1b
13268 +6: movl $0,PT_ES(%esp)
13269 jmp 1b
13270 .section __ex_table,"a"
13271 .align 4
13272 - .long 1b,2b
13273 + .long 1b,4b
13274 + .long 2b,5b
13275 + .long 3b,6b
13276 .popsection
13277 PTGS_TO_GS_EX
13278 ENDPROC(ia32_sysenter_target)
13279 @@ -519,6 +701,15 @@ syscall_exit:
13280 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13281 jne syscall_exit_work
13282
13283 +restore_all_pax:
13284 +
13285 +#ifdef CONFIG_PAX_RANDKSTACK
13286 + movl %esp, %eax
13287 + call pax_randomize_kstack
13288 +#endif
13289 +
13290 + pax_erase_kstack
13291 +
13292 restore_all:
13293 TRACE_IRQS_IRET
13294 restore_all_notrace:
13295 @@ -578,14 +769,34 @@ ldt_ss:
13296 * compensating for the offset by changing to the ESPFIX segment with
13297 * a base address that matches for the difference.
13298 */
13299 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13300 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13301 mov %esp, %edx /* load kernel esp */
13302 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13303 mov %dx, %ax /* eax: new kernel esp */
13304 sub %eax, %edx /* offset (low word is 0) */
13305 +#ifdef CONFIG_SMP
13306 + movl PER_CPU_VAR(cpu_number), %ebx
13307 + shll $PAGE_SHIFT_asm, %ebx
13308 + addl $cpu_gdt_table, %ebx
13309 +#else
13310 + movl $cpu_gdt_table, %ebx
13311 +#endif
13312 shr $16, %edx
13313 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13314 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13315 +
13316 +#ifdef CONFIG_PAX_KERNEXEC
13317 + mov %cr0, %esi
13318 + btr $16, %esi
13319 + mov %esi, %cr0
13320 +#endif
13321 +
13322 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13323 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13324 +
13325 +#ifdef CONFIG_PAX_KERNEXEC
13326 + bts $16, %esi
13327 + mov %esi, %cr0
13328 +#endif
13329 +
13330 pushl_cfi $__ESPFIX_SS
13331 pushl_cfi %eax /* new kernel esp */
13332 /* Disable interrupts, but do not irqtrace this section: we
13333 @@ -614,34 +825,28 @@ work_resched:
13334 movl TI_flags(%ebp), %ecx
13335 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13336 # than syscall tracing?
13337 - jz restore_all
13338 + jz restore_all_pax
13339 testb $_TIF_NEED_RESCHED, %cl
13340 jnz work_resched
13341
13342 work_notifysig: # deal with pending signals and
13343 # notify-resume requests
13344 + movl %esp, %eax
13345 #ifdef CONFIG_VM86
13346 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13347 - movl %esp, %eax
13348 - jne work_notifysig_v86 # returning to kernel-space or
13349 + jz 1f # returning to kernel-space or
13350 # vm86-space
13351 - xorl %edx, %edx
13352 - call do_notify_resume
13353 - jmp resume_userspace_sig
13354
13355 - ALIGN
13356 -work_notifysig_v86:
13357 pushl_cfi %ecx # save ti_flags for do_notify_resume
13358 call save_v86_state # %eax contains pt_regs pointer
13359 popl_cfi %ecx
13360 movl %eax, %esp
13361 -#else
13362 - movl %esp, %eax
13363 +1:
13364 #endif
13365 xorl %edx, %edx
13366 call do_notify_resume
13367 jmp resume_userspace_sig
13368 -END(work_pending)
13369 +ENDPROC(work_pending)
13370
13371 # perform syscall exit tracing
13372 ALIGN
13373 @@ -649,11 +854,14 @@ syscall_trace_entry:
13374 movl $-ENOSYS,PT_EAX(%esp)
13375 movl %esp, %eax
13376 call syscall_trace_enter
13377 +
13378 + pax_erase_kstack
13379 +
13380 /* What it returned is what we'll actually use. */
13381 cmpl $(nr_syscalls), %eax
13382 jnae syscall_call
13383 jmp syscall_exit
13384 -END(syscall_trace_entry)
13385 +ENDPROC(syscall_trace_entry)
13386
13387 # perform syscall exit tracing
13388 ALIGN
13389 @@ -666,20 +874,24 @@ syscall_exit_work:
13390 movl %esp, %eax
13391 call syscall_trace_leave
13392 jmp resume_userspace
13393 -END(syscall_exit_work)
13394 +ENDPROC(syscall_exit_work)
13395 CFI_ENDPROC
13396
13397 RING0_INT_FRAME # can't unwind into user space anyway
13398 syscall_fault:
13399 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13400 + push %ss
13401 + pop %ds
13402 +#endif
13403 GET_THREAD_INFO(%ebp)
13404 movl $-EFAULT,PT_EAX(%esp)
13405 jmp resume_userspace
13406 -END(syscall_fault)
13407 +ENDPROC(syscall_fault)
13408
13409 syscall_badsys:
13410 movl $-ENOSYS,PT_EAX(%esp)
13411 jmp resume_userspace
13412 -END(syscall_badsys)
13413 +ENDPROC(syscall_badsys)
13414 CFI_ENDPROC
13415 /*
13416 * End of kprobes section
13417 @@ -753,6 +965,36 @@ ptregs_clone:
13418 CFI_ENDPROC
13419 ENDPROC(ptregs_clone)
13420
13421 + ALIGN;
13422 +ENTRY(kernel_execve)
13423 + CFI_STARTPROC
13424 + pushl_cfi %ebp
13425 + sub $PT_OLDSS+4,%esp
13426 + pushl_cfi %edi
13427 + pushl_cfi %ecx
13428 + pushl_cfi %eax
13429 + lea 3*4(%esp),%edi
13430 + mov $PT_OLDSS/4+1,%ecx
13431 + xorl %eax,%eax
13432 + rep stosl
13433 + popl_cfi %eax
13434 + popl_cfi %ecx
13435 + popl_cfi %edi
13436 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13437 + pushl_cfi %esp
13438 + call sys_execve
13439 + add $4,%esp
13440 + CFI_ADJUST_CFA_OFFSET -4
13441 + GET_THREAD_INFO(%ebp)
13442 + test %eax,%eax
13443 + jz syscall_exit
13444 + add $PT_OLDSS+4,%esp
13445 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13446 + popl_cfi %ebp
13447 + ret
13448 + CFI_ENDPROC
13449 +ENDPROC(kernel_execve)
13450 +
13451 .macro FIXUP_ESPFIX_STACK
13452 /*
13453 * Switch back for ESPFIX stack to the normal zerobased stack
13454 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13455 * normal stack and adjusts ESP with the matching offset.
13456 */
13457 /* fixup the stack */
13458 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13459 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13460 +#ifdef CONFIG_SMP
13461 + movl PER_CPU_VAR(cpu_number), %ebx
13462 + shll $PAGE_SHIFT_asm, %ebx
13463 + addl $cpu_gdt_table, %ebx
13464 +#else
13465 + movl $cpu_gdt_table, %ebx
13466 +#endif
13467 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13468 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13469 shl $16, %eax
13470 addl %esp, %eax /* the adjusted stack pointer */
13471 pushl_cfi $__KERNEL_DS
13472 @@ -816,7 +1065,7 @@ vector=vector+1
13473 .endr
13474 2: jmp common_interrupt
13475 .endr
13476 -END(irq_entries_start)
13477 +ENDPROC(irq_entries_start)
13478
13479 .previous
13480 END(interrupt)
13481 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13482 pushl_cfi $do_coprocessor_error
13483 jmp error_code
13484 CFI_ENDPROC
13485 -END(coprocessor_error)
13486 +ENDPROC(coprocessor_error)
13487
13488 ENTRY(simd_coprocessor_error)
13489 RING0_INT_FRAME
13490 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13491 #endif
13492 jmp error_code
13493 CFI_ENDPROC
13494 -END(simd_coprocessor_error)
13495 +ENDPROC(simd_coprocessor_error)
13496
13497 ENTRY(device_not_available)
13498 RING0_INT_FRAME
13499 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13500 pushl_cfi $do_device_not_available
13501 jmp error_code
13502 CFI_ENDPROC
13503 -END(device_not_available)
13504 +ENDPROC(device_not_available)
13505
13506 #ifdef CONFIG_PARAVIRT
13507 ENTRY(native_iret)
13508 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13509 .align 4
13510 .long native_iret, iret_exc
13511 .previous
13512 -END(native_iret)
13513 +ENDPROC(native_iret)
13514
13515 ENTRY(native_irq_enable_sysexit)
13516 sti
13517 sysexit
13518 -END(native_irq_enable_sysexit)
13519 +ENDPROC(native_irq_enable_sysexit)
13520 #endif
13521
13522 ENTRY(overflow)
13523 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13524 pushl_cfi $do_overflow
13525 jmp error_code
13526 CFI_ENDPROC
13527 -END(overflow)
13528 +ENDPROC(overflow)
13529
13530 ENTRY(bounds)
13531 RING0_INT_FRAME
13532 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13533 pushl_cfi $do_bounds
13534 jmp error_code
13535 CFI_ENDPROC
13536 -END(bounds)
13537 +ENDPROC(bounds)
13538
13539 ENTRY(invalid_op)
13540 RING0_INT_FRAME
13541 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13542 pushl_cfi $do_invalid_op
13543 jmp error_code
13544 CFI_ENDPROC
13545 -END(invalid_op)
13546 +ENDPROC(invalid_op)
13547
13548 ENTRY(coprocessor_segment_overrun)
13549 RING0_INT_FRAME
13550 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13551 pushl_cfi $do_coprocessor_segment_overrun
13552 jmp error_code
13553 CFI_ENDPROC
13554 -END(coprocessor_segment_overrun)
13555 +ENDPROC(coprocessor_segment_overrun)
13556
13557 ENTRY(invalid_TSS)
13558 RING0_EC_FRAME
13559 pushl_cfi $do_invalid_TSS
13560 jmp error_code
13561 CFI_ENDPROC
13562 -END(invalid_TSS)
13563 +ENDPROC(invalid_TSS)
13564
13565 ENTRY(segment_not_present)
13566 RING0_EC_FRAME
13567 pushl_cfi $do_segment_not_present
13568 jmp error_code
13569 CFI_ENDPROC
13570 -END(segment_not_present)
13571 +ENDPROC(segment_not_present)
13572
13573 ENTRY(stack_segment)
13574 RING0_EC_FRAME
13575 pushl_cfi $do_stack_segment
13576 jmp error_code
13577 CFI_ENDPROC
13578 -END(stack_segment)
13579 +ENDPROC(stack_segment)
13580
13581 ENTRY(alignment_check)
13582 RING0_EC_FRAME
13583 pushl_cfi $do_alignment_check
13584 jmp error_code
13585 CFI_ENDPROC
13586 -END(alignment_check)
13587 +ENDPROC(alignment_check)
13588
13589 ENTRY(divide_error)
13590 RING0_INT_FRAME
13591 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13592 pushl_cfi $do_divide_error
13593 jmp error_code
13594 CFI_ENDPROC
13595 -END(divide_error)
13596 +ENDPROC(divide_error)
13597
13598 #ifdef CONFIG_X86_MCE
13599 ENTRY(machine_check)
13600 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13601 pushl_cfi machine_check_vector
13602 jmp error_code
13603 CFI_ENDPROC
13604 -END(machine_check)
13605 +ENDPROC(machine_check)
13606 #endif
13607
13608 ENTRY(spurious_interrupt_bug)
13609 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13610 pushl_cfi $do_spurious_interrupt_bug
13611 jmp error_code
13612 CFI_ENDPROC
13613 -END(spurious_interrupt_bug)
13614 +ENDPROC(spurious_interrupt_bug)
13615 /*
13616 * End of kprobes section
13617 */
13618 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13619
13620 ENTRY(mcount)
13621 ret
13622 -END(mcount)
13623 +ENDPROC(mcount)
13624
13625 ENTRY(ftrace_caller)
13626 cmpl $0, function_trace_stop
13627 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13628 .globl ftrace_stub
13629 ftrace_stub:
13630 ret
13631 -END(ftrace_caller)
13632 +ENDPROC(ftrace_caller)
13633
13634 #else /* ! CONFIG_DYNAMIC_FTRACE */
13635
13636 @@ -1174,7 +1423,7 @@ trace:
13637 popl %ecx
13638 popl %eax
13639 jmp ftrace_stub
13640 -END(mcount)
13641 +ENDPROC(mcount)
13642 #endif /* CONFIG_DYNAMIC_FTRACE */
13643 #endif /* CONFIG_FUNCTION_TRACER */
13644
13645 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13646 popl %ecx
13647 popl %eax
13648 ret
13649 -END(ftrace_graph_caller)
13650 +ENDPROC(ftrace_graph_caller)
13651
13652 .globl return_to_handler
13653 return_to_handler:
13654 @@ -1209,7 +1458,6 @@ return_to_handler:
13655 jmp *%ecx
13656 #endif
13657
13658 -.section .rodata,"a"
13659 #include "syscall_table_32.S"
13660
13661 syscall_table_size=(.-sys_call_table)
13662 @@ -1255,15 +1503,18 @@ error_code:
13663 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13664 REG_TO_PTGS %ecx
13665 SET_KERNEL_GS %ecx
13666 - movl $(__USER_DS), %ecx
13667 + movl $(__KERNEL_DS), %ecx
13668 movl %ecx, %ds
13669 movl %ecx, %es
13670 +
13671 + pax_enter_kernel
13672 +
13673 TRACE_IRQS_OFF
13674 movl %esp,%eax # pt_regs pointer
13675 call *%edi
13676 jmp ret_from_exception
13677 CFI_ENDPROC
13678 -END(page_fault)
13679 +ENDPROC(page_fault)
13680
13681 /*
13682 * Debug traps and NMI can happen at the one SYSENTER instruction
13683 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13684 call do_debug
13685 jmp ret_from_exception
13686 CFI_ENDPROC
13687 -END(debug)
13688 +ENDPROC(debug)
13689
13690 /*
13691 * NMI is doubly nasty. It can happen _while_ we're handling
13692 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13693 xorl %edx,%edx # zero error code
13694 movl %esp,%eax # pt_regs pointer
13695 call do_nmi
13696 +
13697 + pax_exit_kernel
13698 +
13699 jmp restore_all_notrace
13700 CFI_ENDPROC
13701
13702 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13703 FIXUP_ESPFIX_STACK # %eax == %esp
13704 xorl %edx,%edx # zero error code
13705 call do_nmi
13706 +
13707 + pax_exit_kernel
13708 +
13709 RESTORE_REGS
13710 lss 12+4(%esp), %esp # back to espfix stack
13711 CFI_ADJUST_CFA_OFFSET -24
13712 jmp irq_return
13713 CFI_ENDPROC
13714 -END(nmi)
13715 +ENDPROC(nmi)
13716
13717 ENTRY(int3)
13718 RING0_INT_FRAME
13719 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13720 call do_int3
13721 jmp ret_from_exception
13722 CFI_ENDPROC
13723 -END(int3)
13724 +ENDPROC(int3)
13725
13726 ENTRY(general_protection)
13727 RING0_EC_FRAME
13728 pushl_cfi $do_general_protection
13729 jmp error_code
13730 CFI_ENDPROC
13731 -END(general_protection)
13732 +ENDPROC(general_protection)
13733
13734 #ifdef CONFIG_KVM_GUEST
13735 ENTRY(async_page_fault)
13736 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13737 pushl_cfi $do_async_page_fault
13738 jmp error_code
13739 CFI_ENDPROC
13740 -END(async_page_fault)
13741 +ENDPROC(async_page_fault)
13742 #endif
13743
13744 /*
13745 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13746 index faf8d5e..4f16a68 100644
13747 --- a/arch/x86/kernel/entry_64.S
13748 +++ b/arch/x86/kernel/entry_64.S
13749 @@ -55,6 +55,8 @@
13750 #include <asm/paravirt.h>
13751 #include <asm/ftrace.h>
13752 #include <asm/percpu.h>
13753 +#include <asm/pgtable.h>
13754 +#include <asm/alternative-asm.h>
13755
13756 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13757 #include <linux/elf-em.h>
13758 @@ -68,8 +70,9 @@
13759 #ifdef CONFIG_FUNCTION_TRACER
13760 #ifdef CONFIG_DYNAMIC_FTRACE
13761 ENTRY(mcount)
13762 + pax_force_retaddr
13763 retq
13764 -END(mcount)
13765 +ENDPROC(mcount)
13766
13767 ENTRY(ftrace_caller)
13768 cmpl $0, function_trace_stop
13769 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13770 #endif
13771
13772 GLOBAL(ftrace_stub)
13773 + pax_force_retaddr
13774 retq
13775 -END(ftrace_caller)
13776 +ENDPROC(ftrace_caller)
13777
13778 #else /* ! CONFIG_DYNAMIC_FTRACE */
13779 ENTRY(mcount)
13780 @@ -112,6 +116,7 @@ ENTRY(mcount)
13781 #endif
13782
13783 GLOBAL(ftrace_stub)
13784 + pax_force_retaddr
13785 retq
13786
13787 trace:
13788 @@ -121,12 +126,13 @@ trace:
13789 movq 8(%rbp), %rsi
13790 subq $MCOUNT_INSN_SIZE, %rdi
13791
13792 + pax_force_fptr ftrace_trace_function
13793 call *ftrace_trace_function
13794
13795 MCOUNT_RESTORE_FRAME
13796
13797 jmp ftrace_stub
13798 -END(mcount)
13799 +ENDPROC(mcount)
13800 #endif /* CONFIG_DYNAMIC_FTRACE */
13801 #endif /* CONFIG_FUNCTION_TRACER */
13802
13803 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13804
13805 MCOUNT_RESTORE_FRAME
13806
13807 + pax_force_retaddr
13808 retq
13809 -END(ftrace_graph_caller)
13810 +ENDPROC(ftrace_graph_caller)
13811
13812 GLOBAL(return_to_handler)
13813 subq $24, %rsp
13814 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13815 movq 8(%rsp), %rdx
13816 movq (%rsp), %rax
13817 addq $24, %rsp
13818 + pax_force_fptr %rdi
13819 jmp *%rdi
13820 #endif
13821
13822 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13823 ENDPROC(native_usergs_sysret64)
13824 #endif /* CONFIG_PARAVIRT */
13825
13826 + .macro ljmpq sel, off
13827 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13828 + .byte 0x48; ljmp *1234f(%rip)
13829 + .pushsection .rodata
13830 + .align 16
13831 + 1234: .quad \off; .word \sel
13832 + .popsection
13833 +#else
13834 + pushq $\sel
13835 + pushq $\off
13836 + lretq
13837 +#endif
13838 + .endm
13839 +
13840 + .macro pax_enter_kernel
13841 + pax_set_fptr_mask
13842 +#ifdef CONFIG_PAX_KERNEXEC
13843 + call pax_enter_kernel
13844 +#endif
13845 + .endm
13846 +
13847 + .macro pax_exit_kernel
13848 +#ifdef CONFIG_PAX_KERNEXEC
13849 + call pax_exit_kernel
13850 +#endif
13851 + .endm
13852 +
13853 +#ifdef CONFIG_PAX_KERNEXEC
13854 +ENTRY(pax_enter_kernel)
13855 + pushq %rdi
13856 +
13857 +#ifdef CONFIG_PARAVIRT
13858 + PV_SAVE_REGS(CLBR_RDI)
13859 +#endif
13860 +
13861 + GET_CR0_INTO_RDI
13862 + bts $16,%rdi
13863 + jnc 3f
13864 + mov %cs,%edi
13865 + cmp $__KERNEL_CS,%edi
13866 + jnz 2f
13867 +1:
13868 +
13869 +#ifdef CONFIG_PARAVIRT
13870 + PV_RESTORE_REGS(CLBR_RDI)
13871 +#endif
13872 +
13873 + popq %rdi
13874 + pax_force_retaddr
13875 + retq
13876 +
13877 +2: ljmpq __KERNEL_CS,1f
13878 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13879 +4: SET_RDI_INTO_CR0
13880 + jmp 1b
13881 +ENDPROC(pax_enter_kernel)
13882 +
13883 +ENTRY(pax_exit_kernel)
13884 + pushq %rdi
13885 +
13886 +#ifdef CONFIG_PARAVIRT
13887 + PV_SAVE_REGS(CLBR_RDI)
13888 +#endif
13889 +
13890 + mov %cs,%rdi
13891 + cmp $__KERNEXEC_KERNEL_CS,%edi
13892 + jz 2f
13893 +1:
13894 +
13895 +#ifdef CONFIG_PARAVIRT
13896 + PV_RESTORE_REGS(CLBR_RDI);
13897 +#endif
13898 +
13899 + popq %rdi
13900 + pax_force_retaddr
13901 + retq
13902 +
13903 +2: GET_CR0_INTO_RDI
13904 + btr $16,%rdi
13905 + ljmpq __KERNEL_CS,3f
13906 +3: SET_RDI_INTO_CR0
13907 + jmp 1b
13908 +#ifdef CONFIG_PARAVIRT
13909 + PV_RESTORE_REGS(CLBR_RDI);
13910 +#endif
13911 +
13912 + popq %rdi
13913 + pax_force_retaddr
13914 + retq
13915 +ENDPROC(pax_exit_kernel)
13916 +#endif
13917 +
13918 + .macro pax_enter_kernel_user
13919 + pax_set_fptr_mask
13920 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13921 + call pax_enter_kernel_user
13922 +#endif
13923 + .endm
13924 +
13925 + .macro pax_exit_kernel_user
13926 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13927 + call pax_exit_kernel_user
13928 +#endif
13929 +#ifdef CONFIG_PAX_RANDKSTACK
13930 + pushq %rax
13931 + call pax_randomize_kstack
13932 + popq %rax
13933 +#endif
13934 + .endm
13935 +
13936 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13937 +ENTRY(pax_enter_kernel_user)
13938 + pushq %rdi
13939 + pushq %rbx
13940 +
13941 +#ifdef CONFIG_PARAVIRT
13942 + PV_SAVE_REGS(CLBR_RDI)
13943 +#endif
13944 +
13945 + GET_CR3_INTO_RDI
13946 + mov %rdi,%rbx
13947 + add $__START_KERNEL_map,%rbx
13948 + sub phys_base(%rip),%rbx
13949 +
13950 +#ifdef CONFIG_PARAVIRT
13951 + pushq %rdi
13952 + cmpl $0, pv_info+PARAVIRT_enabled
13953 + jz 1f
13954 + i = 0
13955 + .rept USER_PGD_PTRS
13956 + mov i*8(%rbx),%rsi
13957 + mov $0,%sil
13958 + lea i*8(%rbx),%rdi
13959 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13960 + i = i + 1
13961 + .endr
13962 + jmp 2f
13963 +1:
13964 +#endif
13965 +
13966 + i = 0
13967 + .rept USER_PGD_PTRS
13968 + movb $0,i*8(%rbx)
13969 + i = i + 1
13970 + .endr
13971 +
13972 +#ifdef CONFIG_PARAVIRT
13973 +2: popq %rdi
13974 +#endif
13975 + SET_RDI_INTO_CR3
13976 +
13977 +#ifdef CONFIG_PAX_KERNEXEC
13978 + GET_CR0_INTO_RDI
13979 + bts $16,%rdi
13980 + SET_RDI_INTO_CR0
13981 +#endif
13982 +
13983 +#ifdef CONFIG_PARAVIRT
13984 + PV_RESTORE_REGS(CLBR_RDI)
13985 +#endif
13986 +
13987 + popq %rbx
13988 + popq %rdi
13989 + pax_force_retaddr
13990 + retq
13991 +ENDPROC(pax_enter_kernel_user)
13992 +
13993 +ENTRY(pax_exit_kernel_user)
13994 + push %rdi
13995 +
13996 +#ifdef CONFIG_PARAVIRT
13997 + pushq %rbx
13998 + PV_SAVE_REGS(CLBR_RDI)
13999 +#endif
14000 +
14001 +#ifdef CONFIG_PAX_KERNEXEC
14002 + GET_CR0_INTO_RDI
14003 + btr $16,%rdi
14004 + SET_RDI_INTO_CR0
14005 +#endif
14006 +
14007 + GET_CR3_INTO_RDI
14008 + add $__START_KERNEL_map,%rdi
14009 + sub phys_base(%rip),%rdi
14010 +
14011 +#ifdef CONFIG_PARAVIRT
14012 + cmpl $0, pv_info+PARAVIRT_enabled
14013 + jz 1f
14014 + mov %rdi,%rbx
14015 + i = 0
14016 + .rept USER_PGD_PTRS
14017 + mov i*8(%rbx),%rsi
14018 + mov $0x67,%sil
14019 + lea i*8(%rbx),%rdi
14020 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
14021 + i = i + 1
14022 + .endr
14023 + jmp 2f
14024 +1:
14025 +#endif
14026 +
14027 + i = 0
14028 + .rept USER_PGD_PTRS
14029 + movb $0x67,i*8(%rdi)
14030 + i = i + 1
14031 + .endr
14032 +
14033 +#ifdef CONFIG_PARAVIRT
14034 +2: PV_RESTORE_REGS(CLBR_RDI)
14035 + popq %rbx
14036 +#endif
14037 +
14038 + popq %rdi
14039 + pax_force_retaddr
14040 + retq
14041 +ENDPROC(pax_exit_kernel_user)
14042 +#endif
14043 +
14044 +.macro pax_erase_kstack
14045 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14046 + call pax_erase_kstack
14047 +#endif
14048 +.endm
14049 +
14050 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14051 +/*
14052 + * r11: thread_info
14053 + * rcx, rdx: can be clobbered
14054 + */
14055 +ENTRY(pax_erase_kstack)
14056 + pushq %rdi
14057 + pushq %rax
14058 + pushq %r11
14059 +
14060 + GET_THREAD_INFO(%r11)
14061 + mov TI_lowest_stack(%r11), %rdi
14062 + mov $-0xBEEF, %rax
14063 + std
14064 +
14065 +1: mov %edi, %ecx
14066 + and $THREAD_SIZE_asm - 1, %ecx
14067 + shr $3, %ecx
14068 + repne scasq
14069 + jecxz 2f
14070 +
14071 + cmp $2*8, %ecx
14072 + jc 2f
14073 +
14074 + mov $2*8, %ecx
14075 + repe scasq
14076 + jecxz 2f
14077 + jne 1b
14078 +
14079 +2: cld
14080 + mov %esp, %ecx
14081 + sub %edi, %ecx
14082 +
14083 + cmp $THREAD_SIZE_asm, %rcx
14084 + jb 3f
14085 + ud2
14086 +3:
14087 +
14088 + shr $3, %ecx
14089 + rep stosq
14090 +
14091 + mov TI_task_thread_sp0(%r11), %rdi
14092 + sub $256, %rdi
14093 + mov %rdi, TI_lowest_stack(%r11)
14094 +
14095 + popq %r11
14096 + popq %rax
14097 + popq %rdi
14098 + pax_force_retaddr
14099 + ret
14100 +ENDPROC(pax_erase_kstack)
14101 +#endif
14102
14103 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14104 #ifdef CONFIG_TRACE_IRQFLAGS
14105 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
14106 .endm
14107
14108 .macro UNFAKE_STACK_FRAME
14109 - addq $8*6, %rsp
14110 - CFI_ADJUST_CFA_OFFSET -(6*8)
14111 + addq $8*6 + ARG_SKIP, %rsp
14112 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
14113 .endm
14114
14115 /*
14116 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
14117 movq %rsp, %rsi
14118
14119 leaq -RBP(%rsp),%rdi /* arg1 for handler */
14120 - testl $3, CS(%rdi)
14121 + testb $3, CS(%rdi)
14122 je 1f
14123 SWAPGS
14124 /*
14125 @@ -355,9 +639,10 @@ ENTRY(save_rest)
14126 movq_cfi r15, R15+16
14127 movq %r11, 8(%rsp) /* return address */
14128 FIXUP_TOP_OF_STACK %r11, 16
14129 + pax_force_retaddr
14130 ret
14131 CFI_ENDPROC
14132 -END(save_rest)
14133 +ENDPROC(save_rest)
14134
14135 /* save complete stack frame */
14136 .pushsection .kprobes.text, "ax"
14137 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
14138 js 1f /* negative -> in kernel */
14139 SWAPGS
14140 xorl %ebx,%ebx
14141 -1: ret
14142 +1: pax_force_retaddr_bts
14143 + ret
14144 CFI_ENDPROC
14145 -END(save_paranoid)
14146 +ENDPROC(save_paranoid)
14147 .popsection
14148
14149 /*
14150 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
14151
14152 RESTORE_REST
14153
14154 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14155 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14156 je int_ret_from_sys_call
14157
14158 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14159 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
14160 jmp ret_from_sys_call # go to the SYSRET fastpath
14161
14162 CFI_ENDPROC
14163 -END(ret_from_fork)
14164 +ENDPROC(ret_from_fork)
14165
14166 /*
14167 * System call entry. Up to 6 arguments in registers are supported.
14168 @@ -456,7 +742,7 @@ END(ret_from_fork)
14169 ENTRY(system_call)
14170 CFI_STARTPROC simple
14171 CFI_SIGNAL_FRAME
14172 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14173 + CFI_DEF_CFA rsp,0
14174 CFI_REGISTER rip,rcx
14175 /*CFI_REGISTER rflags,r11*/
14176 SWAPGS_UNSAFE_STACK
14177 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
14178
14179 movq %rsp,PER_CPU_VAR(old_rsp)
14180 movq PER_CPU_VAR(kernel_stack),%rsp
14181 + SAVE_ARGS 8*6,0
14182 + pax_enter_kernel_user
14183 /*
14184 * No need to follow this irqs off/on section - it's straight
14185 * and short:
14186 */
14187 ENABLE_INTERRUPTS(CLBR_NONE)
14188 - SAVE_ARGS 8,0
14189 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14190 movq %rcx,RIP-ARGOFFSET(%rsp)
14191 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14192 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
14193 system_call_fastpath:
14194 cmpq $__NR_syscall_max,%rax
14195 ja badsys
14196 - movq %r10,%rcx
14197 + movq R10-ARGOFFSET(%rsp),%rcx
14198 call *sys_call_table(,%rax,8) # XXX: rip relative
14199 movq %rax,RAX-ARGOFFSET(%rsp)
14200 /*
14201 @@ -503,6 +790,8 @@ sysret_check:
14202 andl %edi,%edx
14203 jnz sysret_careful
14204 CFI_REMEMBER_STATE
14205 + pax_exit_kernel_user
14206 + pax_erase_kstack
14207 /*
14208 * sysretq will re-enable interrupts:
14209 */
14210 @@ -554,14 +843,18 @@ badsys:
14211 * jump back to the normal fast path.
14212 */
14213 auditsys:
14214 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
14215 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
14216 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
14217 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
14218 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
14219 movq %rax,%rsi /* 2nd arg: syscall number */
14220 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14221 call audit_syscall_entry
14222 +
14223 + pax_erase_kstack
14224 +
14225 LOAD_ARGS 0 /* reload call-clobbered registers */
14226 + pax_set_fptr_mask
14227 jmp system_call_fastpath
14228
14229 /*
14230 @@ -591,16 +884,20 @@ tracesys:
14231 FIXUP_TOP_OF_STACK %rdi
14232 movq %rsp,%rdi
14233 call syscall_trace_enter
14234 +
14235 + pax_erase_kstack
14236 +
14237 /*
14238 * Reload arg registers from stack in case ptrace changed them.
14239 * We don't reload %rax because syscall_trace_enter() returned
14240 * the value it wants us to use in the table lookup.
14241 */
14242 LOAD_ARGS ARGOFFSET, 1
14243 + pax_set_fptr_mask
14244 RESTORE_REST
14245 cmpq $__NR_syscall_max,%rax
14246 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
14247 - movq %r10,%rcx /* fixup for C */
14248 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
14249 call *sys_call_table(,%rax,8)
14250 movq %rax,RAX-ARGOFFSET(%rsp)
14251 /* Use IRET because user could have changed frame */
14252 @@ -612,7 +909,7 @@ tracesys:
14253 GLOBAL(int_ret_from_sys_call)
14254 DISABLE_INTERRUPTS(CLBR_NONE)
14255 TRACE_IRQS_OFF
14256 - testl $3,CS-ARGOFFSET(%rsp)
14257 + testb $3,CS-ARGOFFSET(%rsp)
14258 je retint_restore_args
14259 movl $_TIF_ALLWORK_MASK,%edi
14260 /* edi: mask to check */
14261 @@ -623,6 +920,7 @@ GLOBAL(int_with_check)
14262 andl %edi,%edx
14263 jnz int_careful
14264 andl $~TS_COMPAT,TI_status(%rcx)
14265 + pax_erase_kstack
14266 jmp retint_swapgs
14267
14268 /* Either reschedule or signal or syscall exit tracking needed. */
14269 @@ -669,7 +967,7 @@ int_restore_rest:
14270 TRACE_IRQS_OFF
14271 jmp int_with_check
14272 CFI_ENDPROC
14273 -END(system_call)
14274 +ENDPROC(system_call)
14275
14276 /*
14277 * Certain special system calls that need to save a complete full stack frame.
14278 @@ -685,7 +983,7 @@ ENTRY(\label)
14279 call \func
14280 jmp ptregscall_common
14281 CFI_ENDPROC
14282 -END(\label)
14283 +ENDPROC(\label)
14284 .endm
14285
14286 PTREGSCALL stub_clone, sys_clone, %r8
14287 @@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
14288 movq_cfi_restore R12+8, r12
14289 movq_cfi_restore RBP+8, rbp
14290 movq_cfi_restore RBX+8, rbx
14291 + pax_force_retaddr
14292 ret $REST_SKIP /* pop extended registers */
14293 CFI_ENDPROC
14294 -END(ptregscall_common)
14295 +ENDPROC(ptregscall_common)
14296
14297 ENTRY(stub_execve)
14298 CFI_STARTPROC
14299 @@ -720,7 +1019,7 @@ ENTRY(stub_execve)
14300 RESTORE_REST
14301 jmp int_ret_from_sys_call
14302 CFI_ENDPROC
14303 -END(stub_execve)
14304 +ENDPROC(stub_execve)
14305
14306 /*
14307 * sigreturn is special because it needs to restore all registers on return.
14308 @@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
14309 RESTORE_REST
14310 jmp int_ret_from_sys_call
14311 CFI_ENDPROC
14312 -END(stub_rt_sigreturn)
14313 +ENDPROC(stub_rt_sigreturn)
14314
14315 /*
14316 * Build the entry stubs and pointer table with some assembler magic.
14317 @@ -773,7 +1072,7 @@ vector=vector+1
14318 2: jmp common_interrupt
14319 .endr
14320 CFI_ENDPROC
14321 -END(irq_entries_start)
14322 +ENDPROC(irq_entries_start)
14323
14324 .previous
14325 END(interrupt)
14326 @@ -793,6 +1092,16 @@ END(interrupt)
14327 subq $ORIG_RAX-RBP, %rsp
14328 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14329 SAVE_ARGS_IRQ
14330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14331 + testb $3, CS(%rdi)
14332 + jnz 1f
14333 + pax_enter_kernel
14334 + jmp 2f
14335 +1: pax_enter_kernel_user
14336 +2:
14337 +#else
14338 + pax_enter_kernel
14339 +#endif
14340 call \func
14341 .endm
14342
14343 @@ -824,7 +1133,7 @@ ret_from_intr:
14344
14345 exit_intr:
14346 GET_THREAD_INFO(%rcx)
14347 - testl $3,CS-ARGOFFSET(%rsp)
14348 + testb $3,CS-ARGOFFSET(%rsp)
14349 je retint_kernel
14350
14351 /* Interrupt came from user space */
14352 @@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
14353 * The iretq could re-enable interrupts:
14354 */
14355 DISABLE_INTERRUPTS(CLBR_ANY)
14356 + pax_exit_kernel_user
14357 TRACE_IRQS_IRETQ
14358 SWAPGS
14359 jmp restore_args
14360
14361 retint_restore_args: /* return to kernel space */
14362 DISABLE_INTERRUPTS(CLBR_ANY)
14363 + pax_exit_kernel
14364 + pax_force_retaddr RIP-ARGOFFSET
14365 /*
14366 * The iretq could re-enable interrupts:
14367 */
14368 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14369 #endif
14370
14371 CFI_ENDPROC
14372 -END(common_interrupt)
14373 +ENDPROC(common_interrupt)
14374 /*
14375 * End of kprobes section
14376 */
14377 @@ -956,7 +1268,7 @@ ENTRY(\sym)
14378 interrupt \do_sym
14379 jmp ret_from_intr
14380 CFI_ENDPROC
14381 -END(\sym)
14382 +ENDPROC(\sym)
14383 .endm
14384
14385 #ifdef CONFIG_SMP
14386 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
14387 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14388 call error_entry
14389 DEFAULT_FRAME 0
14390 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14391 + testb $3, CS(%rsp)
14392 + jnz 1f
14393 + pax_enter_kernel
14394 + jmp 2f
14395 +1: pax_enter_kernel_user
14396 +2:
14397 +#else
14398 + pax_enter_kernel
14399 +#endif
14400 movq %rsp,%rdi /* pt_regs pointer */
14401 xorl %esi,%esi /* no error code */
14402 call \do_sym
14403 jmp error_exit /* %ebx: no swapgs flag */
14404 CFI_ENDPROC
14405 -END(\sym)
14406 +ENDPROC(\sym)
14407 .endm
14408
14409 .macro paranoidzeroentry sym do_sym
14410 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
14411 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14412 call save_paranoid
14413 TRACE_IRQS_OFF
14414 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14415 + testb $3, CS(%rsp)
14416 + jnz 1f
14417 + pax_enter_kernel
14418 + jmp 2f
14419 +1: pax_enter_kernel_user
14420 +2:
14421 +#else
14422 + pax_enter_kernel
14423 +#endif
14424 movq %rsp,%rdi /* pt_regs pointer */
14425 xorl %esi,%esi /* no error code */
14426 call \do_sym
14427 jmp paranoid_exit /* %ebx: no swapgs flag */
14428 CFI_ENDPROC
14429 -END(\sym)
14430 +ENDPROC(\sym)
14431 .endm
14432
14433 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14434 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14435 .macro paranoidzeroentry_ist sym do_sym ist
14436 ENTRY(\sym)
14437 INTR_FRAME
14438 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
14439 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14440 call save_paranoid
14441 TRACE_IRQS_OFF
14442 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14443 + testb $3, CS(%rsp)
14444 + jnz 1f
14445 + pax_enter_kernel
14446 + jmp 2f
14447 +1: pax_enter_kernel_user
14448 +2:
14449 +#else
14450 + pax_enter_kernel
14451 +#endif
14452 movq %rsp,%rdi /* pt_regs pointer */
14453 xorl %esi,%esi /* no error code */
14454 +#ifdef CONFIG_SMP
14455 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14456 + lea init_tss(%r12), %r12
14457 +#else
14458 + lea init_tss(%rip), %r12
14459 +#endif
14460 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14461 call \do_sym
14462 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14463 jmp paranoid_exit /* %ebx: no swapgs flag */
14464 CFI_ENDPROC
14465 -END(\sym)
14466 +ENDPROC(\sym)
14467 .endm
14468
14469 .macro errorentry sym do_sym
14470 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
14471 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14472 call error_entry
14473 DEFAULT_FRAME 0
14474 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14475 + testb $3, CS(%rsp)
14476 + jnz 1f
14477 + pax_enter_kernel
14478 + jmp 2f
14479 +1: pax_enter_kernel_user
14480 +2:
14481 +#else
14482 + pax_enter_kernel
14483 +#endif
14484 movq %rsp,%rdi /* pt_regs pointer */
14485 movq ORIG_RAX(%rsp),%rsi /* get error code */
14486 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14487 call \do_sym
14488 jmp error_exit /* %ebx: no swapgs flag */
14489 CFI_ENDPROC
14490 -END(\sym)
14491 +ENDPROC(\sym)
14492 .endm
14493
14494 /* error code is on the stack already */
14495 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
14496 call save_paranoid
14497 DEFAULT_FRAME 0
14498 TRACE_IRQS_OFF
14499 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14500 + testb $3, CS(%rsp)
14501 + jnz 1f
14502 + pax_enter_kernel
14503 + jmp 2f
14504 +1: pax_enter_kernel_user
14505 +2:
14506 +#else
14507 + pax_enter_kernel
14508 +#endif
14509 movq %rsp,%rdi /* pt_regs pointer */
14510 movq ORIG_RAX(%rsp),%rsi /* get error code */
14511 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14512 call \do_sym
14513 jmp paranoid_exit /* %ebx: no swapgs flag */
14514 CFI_ENDPROC
14515 -END(\sym)
14516 +ENDPROC(\sym)
14517 .endm
14518
14519 zeroentry divide_error do_divide_error
14520 @@ -1129,9 +1497,10 @@ gs_change:
14521 2: mfence /* workaround */
14522 SWAPGS
14523 popfq_cfi
14524 + pax_force_retaddr
14525 ret
14526 CFI_ENDPROC
14527 -END(native_load_gs_index)
14528 +ENDPROC(native_load_gs_index)
14529
14530 .section __ex_table,"a"
14531 .align 8
14532 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14533 * Here we are in the child and the registers are set as they were
14534 * at kernel_thread() invocation in the parent.
14535 */
14536 + pax_force_fptr %rsi
14537 call *%rsi
14538 # exit
14539 mov %eax, %edi
14540 call do_exit
14541 ud2 # padding for call trace
14542 CFI_ENDPROC
14543 -END(kernel_thread_helper)
14544 +ENDPROC(kernel_thread_helper)
14545
14546 /*
14547 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14548 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14549 RESTORE_REST
14550 testq %rax,%rax
14551 je int_ret_from_sys_call
14552 - RESTORE_ARGS
14553 UNFAKE_STACK_FRAME
14554 + pax_force_retaddr
14555 ret
14556 CFI_ENDPROC
14557 -END(kernel_execve)
14558 +ENDPROC(kernel_execve)
14559
14560 /* Call softirq on interrupt stack. Interrupts are off. */
14561 ENTRY(call_softirq)
14562 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14563 CFI_DEF_CFA_REGISTER rsp
14564 CFI_ADJUST_CFA_OFFSET -8
14565 decl PER_CPU_VAR(irq_count)
14566 + pax_force_retaddr
14567 ret
14568 CFI_ENDPROC
14569 -END(call_softirq)
14570 +ENDPROC(call_softirq)
14571
14572 #ifdef CONFIG_XEN
14573 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14574 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14575 decl PER_CPU_VAR(irq_count)
14576 jmp error_exit
14577 CFI_ENDPROC
14578 -END(xen_do_hypervisor_callback)
14579 +ENDPROC(xen_do_hypervisor_callback)
14580
14581 /*
14582 * Hypervisor uses this for application faults while it executes.
14583 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14584 SAVE_ALL
14585 jmp error_exit
14586 CFI_ENDPROC
14587 -END(xen_failsafe_callback)
14588 +ENDPROC(xen_failsafe_callback)
14589
14590 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14591 xen_hvm_callback_vector xen_evtchn_do_upcall
14592 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14593 TRACE_IRQS_OFF
14594 testl %ebx,%ebx /* swapgs needed? */
14595 jnz paranoid_restore
14596 - testl $3,CS(%rsp)
14597 + testb $3,CS(%rsp)
14598 jnz paranoid_userspace
14599 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14600 + pax_exit_kernel
14601 + TRACE_IRQS_IRETQ 0
14602 + SWAPGS_UNSAFE_STACK
14603 + RESTORE_ALL 8
14604 + pax_force_retaddr_bts
14605 + jmp irq_return
14606 +#endif
14607 paranoid_swapgs:
14608 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14609 + pax_exit_kernel_user
14610 +#else
14611 + pax_exit_kernel
14612 +#endif
14613 TRACE_IRQS_IRETQ 0
14614 SWAPGS_UNSAFE_STACK
14615 RESTORE_ALL 8
14616 jmp irq_return
14617 paranoid_restore:
14618 + pax_exit_kernel
14619 TRACE_IRQS_IRETQ 0
14620 RESTORE_ALL 8
14621 + pax_force_retaddr_bts
14622 jmp irq_return
14623 paranoid_userspace:
14624 GET_THREAD_INFO(%rcx)
14625 @@ -1394,7 +1780,7 @@ paranoid_schedule:
14626 TRACE_IRQS_OFF
14627 jmp paranoid_userspace
14628 CFI_ENDPROC
14629 -END(paranoid_exit)
14630 +ENDPROC(paranoid_exit)
14631
14632 /*
14633 * Exception entry point. This expects an error code/orig_rax on the stack.
14634 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14635 movq_cfi r14, R14+8
14636 movq_cfi r15, R15+8
14637 xorl %ebx,%ebx
14638 - testl $3,CS+8(%rsp)
14639 + testb $3,CS+8(%rsp)
14640 je error_kernelspace
14641 error_swapgs:
14642 SWAPGS
14643 error_sti:
14644 TRACE_IRQS_OFF
14645 + pax_force_retaddr_bts
14646 ret
14647
14648 /*
14649 @@ -1453,7 +1840,7 @@ bstep_iret:
14650 movq %rcx,RIP+8(%rsp)
14651 jmp error_swapgs
14652 CFI_ENDPROC
14653 -END(error_entry)
14654 +ENDPROC(error_entry)
14655
14656
14657 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14658 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14659 jnz retint_careful
14660 jmp retint_swapgs
14661 CFI_ENDPROC
14662 -END(error_exit)
14663 +ENDPROC(error_exit)
14664
14665
14666 /* runs on exception stack */
14667 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
14668 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14669 call save_paranoid
14670 DEFAULT_FRAME 0
14671 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14672 + testb $3, CS(%rsp)
14673 + jnz 1f
14674 + pax_enter_kernel
14675 + jmp 2f
14676 +1: pax_enter_kernel_user
14677 +2:
14678 +#else
14679 + pax_enter_kernel
14680 +#endif
14681 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14682 movq %rsp,%rdi
14683 movq $-1,%rsi
14684 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
14685 DISABLE_INTERRUPTS(CLBR_NONE)
14686 testl %ebx,%ebx /* swapgs needed? */
14687 jnz nmi_restore
14688 - testl $3,CS(%rsp)
14689 + testb $3,CS(%rsp)
14690 jnz nmi_userspace
14691 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14692 + pax_exit_kernel
14693 + SWAPGS_UNSAFE_STACK
14694 + RESTORE_ALL 8
14695 + pax_force_retaddr_bts
14696 + jmp irq_return
14697 +#endif
14698 nmi_swapgs:
14699 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14700 + pax_exit_kernel_user
14701 +#else
14702 + pax_exit_kernel
14703 +#endif
14704 SWAPGS_UNSAFE_STACK
14705 + RESTORE_ALL 8
14706 + jmp irq_return
14707 nmi_restore:
14708 + pax_exit_kernel
14709 RESTORE_ALL 8
14710 + pax_force_retaddr_bts
14711 jmp irq_return
14712 nmi_userspace:
14713 GET_THREAD_INFO(%rcx)
14714 @@ -1529,14 +1942,14 @@ nmi_schedule:
14715 jmp paranoid_exit
14716 CFI_ENDPROC
14717 #endif
14718 -END(nmi)
14719 +ENDPROC(nmi)
14720
14721 ENTRY(ignore_sysret)
14722 CFI_STARTPROC
14723 mov $-ENOSYS,%eax
14724 sysret
14725 CFI_ENDPROC
14726 -END(ignore_sysret)
14727 +ENDPROC(ignore_sysret)
14728
14729 /*
14730 * End of kprobes section
14731 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14732 index c9a281f..ce2f317 100644
14733 --- a/arch/x86/kernel/ftrace.c
14734 +++ b/arch/x86/kernel/ftrace.c
14735 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14736 static const void *mod_code_newcode; /* holds the text to write to the IP */
14737
14738 static unsigned nmi_wait_count;
14739 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14740 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14741
14742 int ftrace_arch_read_dyn_info(char *buf, int size)
14743 {
14744 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14745
14746 r = snprintf(buf, size, "%u %u",
14747 nmi_wait_count,
14748 - atomic_read(&nmi_update_count));
14749 + atomic_read_unchecked(&nmi_update_count));
14750 return r;
14751 }
14752
14753 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14754
14755 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14756 smp_rmb();
14757 + pax_open_kernel();
14758 ftrace_mod_code();
14759 - atomic_inc(&nmi_update_count);
14760 + pax_close_kernel();
14761 + atomic_inc_unchecked(&nmi_update_count);
14762 }
14763 /* Must have previous changes seen before executions */
14764 smp_mb();
14765 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14766 {
14767 unsigned char replaced[MCOUNT_INSN_SIZE];
14768
14769 + ip = ktla_ktva(ip);
14770 +
14771 /*
14772 * Note: Due to modules and __init, code can
14773 * disappear and change, we need to protect against faulting
14774 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14775 unsigned char old[MCOUNT_INSN_SIZE], *new;
14776 int ret;
14777
14778 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14779 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14780 new = ftrace_call_replace(ip, (unsigned long)func);
14781 ret = ftrace_modify_code(ip, old, new);
14782
14783 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14784 {
14785 unsigned char code[MCOUNT_INSN_SIZE];
14786
14787 + ip = ktla_ktva(ip);
14788 +
14789 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14790 return -EFAULT;
14791
14792 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14793 index 3bb0850..55a56f4 100644
14794 --- a/arch/x86/kernel/head32.c
14795 +++ b/arch/x86/kernel/head32.c
14796 @@ -19,6 +19,7 @@
14797 #include <asm/io_apic.h>
14798 #include <asm/bios_ebda.h>
14799 #include <asm/tlbflush.h>
14800 +#include <asm/boot.h>
14801
14802 static void __init i386_default_early_setup(void)
14803 {
14804 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14805 {
14806 memblock_init();
14807
14808 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14809 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14810
14811 #ifdef CONFIG_BLK_DEV_INITRD
14812 /* Reserve INITRD */
14813 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14814 index ce0be7c..c41476e 100644
14815 --- a/arch/x86/kernel/head_32.S
14816 +++ b/arch/x86/kernel/head_32.S
14817 @@ -25,6 +25,12 @@
14818 /* Physical address */
14819 #define pa(X) ((X) - __PAGE_OFFSET)
14820
14821 +#ifdef CONFIG_PAX_KERNEXEC
14822 +#define ta(X) (X)
14823 +#else
14824 +#define ta(X) ((X) - __PAGE_OFFSET)
14825 +#endif
14826 +
14827 /*
14828 * References to members of the new_cpu_data structure.
14829 */
14830 @@ -54,11 +60,7 @@
14831 * and small than max_low_pfn, otherwise will waste some page table entries
14832 */
14833
14834 -#if PTRS_PER_PMD > 1
14835 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14836 -#else
14837 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14838 -#endif
14839 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14840
14841 /* Number of possible pages in the lowmem region */
14842 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14843 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14844 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14845
14846 /*
14847 + * Real beginning of normal "text" segment
14848 + */
14849 +ENTRY(stext)
14850 +ENTRY(_stext)
14851 +
14852 +/*
14853 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14854 * %esi points to the real-mode code as a 32-bit pointer.
14855 * CS and DS must be 4 GB flat segments, but we don't depend on
14856 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14857 * can.
14858 */
14859 __HEAD
14860 +
14861 +#ifdef CONFIG_PAX_KERNEXEC
14862 + jmp startup_32
14863 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14864 +.fill PAGE_SIZE-5,1,0xcc
14865 +#endif
14866 +
14867 ENTRY(startup_32)
14868 movl pa(stack_start),%ecx
14869
14870 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14871 2:
14872 leal -__PAGE_OFFSET(%ecx),%esp
14873
14874 +#ifdef CONFIG_SMP
14875 + movl $pa(cpu_gdt_table),%edi
14876 + movl $__per_cpu_load,%eax
14877 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14878 + rorl $16,%eax
14879 + movb %al,__KERNEL_PERCPU + 4(%edi)
14880 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14881 + movl $__per_cpu_end - 1,%eax
14882 + subl $__per_cpu_start,%eax
14883 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14884 +#endif
14885 +
14886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14887 + movl $NR_CPUS,%ecx
14888 + movl $pa(cpu_gdt_table),%edi
14889 +1:
14890 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14891 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14892 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14893 + addl $PAGE_SIZE_asm,%edi
14894 + loop 1b
14895 +#endif
14896 +
14897 +#ifdef CONFIG_PAX_KERNEXEC
14898 + movl $pa(boot_gdt),%edi
14899 + movl $__LOAD_PHYSICAL_ADDR,%eax
14900 + movw %ax,__BOOT_CS + 2(%edi)
14901 + rorl $16,%eax
14902 + movb %al,__BOOT_CS + 4(%edi)
14903 + movb %ah,__BOOT_CS + 7(%edi)
14904 + rorl $16,%eax
14905 +
14906 + ljmp $(__BOOT_CS),$1f
14907 +1:
14908 +
14909 + movl $NR_CPUS,%ecx
14910 + movl $pa(cpu_gdt_table),%edi
14911 + addl $__PAGE_OFFSET,%eax
14912 +1:
14913 + movw %ax,__KERNEL_CS + 2(%edi)
14914 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14915 + rorl $16,%eax
14916 + movb %al,__KERNEL_CS + 4(%edi)
14917 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14918 + movb %ah,__KERNEL_CS + 7(%edi)
14919 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14920 + rorl $16,%eax
14921 + addl $PAGE_SIZE_asm,%edi
14922 + loop 1b
14923 +#endif
14924 +
14925 /*
14926 * Clear BSS first so that there are no surprises...
14927 */
14928 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14929 movl %eax, pa(max_pfn_mapped)
14930
14931 /* Do early initialization of the fixmap area */
14932 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14933 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14934 +#ifdef CONFIG_COMPAT_VDSO
14935 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14936 +#else
14937 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14938 +#endif
14939 #else /* Not PAE */
14940
14941 page_pde_offset = (__PAGE_OFFSET >> 20);
14942 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14943 movl %eax, pa(max_pfn_mapped)
14944
14945 /* Do early initialization of the fixmap area */
14946 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14947 - movl %eax,pa(initial_page_table+0xffc)
14948 +#ifdef CONFIG_COMPAT_VDSO
14949 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14950 +#else
14951 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14952 +#endif
14953 #endif
14954
14955 #ifdef CONFIG_PARAVIRT
14956 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14957 cmpl $num_subarch_entries, %eax
14958 jae bad_subarch
14959
14960 - movl pa(subarch_entries)(,%eax,4), %eax
14961 - subl $__PAGE_OFFSET, %eax
14962 - jmp *%eax
14963 + jmp *pa(subarch_entries)(,%eax,4)
14964
14965 bad_subarch:
14966 WEAK(lguest_entry)
14967 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14968 __INITDATA
14969
14970 subarch_entries:
14971 - .long default_entry /* normal x86/PC */
14972 - .long lguest_entry /* lguest hypervisor */
14973 - .long xen_entry /* Xen hypervisor */
14974 - .long default_entry /* Moorestown MID */
14975 + .long ta(default_entry) /* normal x86/PC */
14976 + .long ta(lguest_entry) /* lguest hypervisor */
14977 + .long ta(xen_entry) /* Xen hypervisor */
14978 + .long ta(default_entry) /* Moorestown MID */
14979 num_subarch_entries = (. - subarch_entries) / 4
14980 .previous
14981 #else
14982 @@ -312,6 +382,7 @@ default_entry:
14983 orl %edx,%eax
14984 movl %eax,%cr4
14985
14986 +#ifdef CONFIG_X86_PAE
14987 testb $X86_CR4_PAE, %al # check if PAE is enabled
14988 jz 6f
14989
14990 @@ -340,6 +411,9 @@ default_entry:
14991 /* Make changes effective */
14992 wrmsr
14993
14994 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14995 +#endif
14996 +
14997 6:
14998
14999 /*
15000 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
15001 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
15002 movl %eax,%ss # after changing gdt.
15003
15004 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
15005 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
15006 movl %eax,%ds
15007 movl %eax,%es
15008
15009 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
15010 */
15011 cmpb $0,ready
15012 jne 1f
15013 - movl $gdt_page,%eax
15014 + movl $cpu_gdt_table,%eax
15015 movl $stack_canary,%ecx
15016 +#ifdef CONFIG_SMP
15017 + addl $__per_cpu_load,%ecx
15018 +#endif
15019 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
15020 shrl $16, %ecx
15021 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
15022 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
15023 1:
15024 -#endif
15025 movl $(__KERNEL_STACK_CANARY),%eax
15026 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15027 + movl $(__USER_DS),%eax
15028 +#else
15029 + xorl %eax,%eax
15030 +#endif
15031 movl %eax,%gs
15032
15033 xorl %eax,%eax # Clear LDT
15034 @@ -558,22 +639,22 @@ early_page_fault:
15035 jmp early_fault
15036
15037 early_fault:
15038 - cld
15039 #ifdef CONFIG_PRINTK
15040 + cmpl $1,%ss:early_recursion_flag
15041 + je hlt_loop
15042 + incl %ss:early_recursion_flag
15043 + cld
15044 pusha
15045 movl $(__KERNEL_DS),%eax
15046 movl %eax,%ds
15047 movl %eax,%es
15048 - cmpl $2,early_recursion_flag
15049 - je hlt_loop
15050 - incl early_recursion_flag
15051 movl %cr2,%eax
15052 pushl %eax
15053 pushl %edx /* trapno */
15054 pushl $fault_msg
15055 call printk
15056 +; call dump_stack
15057 #endif
15058 - call dump_stack
15059 hlt_loop:
15060 hlt
15061 jmp hlt_loop
15062 @@ -581,8 +662,11 @@ hlt_loop:
15063 /* This is the default interrupt "handler" :-) */
15064 ALIGN
15065 ignore_int:
15066 - cld
15067 #ifdef CONFIG_PRINTK
15068 + cmpl $2,%ss:early_recursion_flag
15069 + je hlt_loop
15070 + incl %ss:early_recursion_flag
15071 + cld
15072 pushl %eax
15073 pushl %ecx
15074 pushl %edx
15075 @@ -591,9 +675,6 @@ ignore_int:
15076 movl $(__KERNEL_DS),%eax
15077 movl %eax,%ds
15078 movl %eax,%es
15079 - cmpl $2,early_recursion_flag
15080 - je hlt_loop
15081 - incl early_recursion_flag
15082 pushl 16(%esp)
15083 pushl 24(%esp)
15084 pushl 32(%esp)
15085 @@ -622,29 +703,43 @@ ENTRY(initial_code)
15086 /*
15087 * BSS section
15088 */
15089 -__PAGE_ALIGNED_BSS
15090 - .align PAGE_SIZE
15091 #ifdef CONFIG_X86_PAE
15092 +.section .initial_pg_pmd,"a",@progbits
15093 initial_pg_pmd:
15094 .fill 1024*KPMDS,4,0
15095 #else
15096 +.section .initial_page_table,"a",@progbits
15097 ENTRY(initial_page_table)
15098 .fill 1024,4,0
15099 #endif
15100 +.section .initial_pg_fixmap,"a",@progbits
15101 initial_pg_fixmap:
15102 .fill 1024,4,0
15103 +.section .empty_zero_page,"a",@progbits
15104 ENTRY(empty_zero_page)
15105 .fill 4096,1,0
15106 +.section .swapper_pg_dir,"a",@progbits
15107 ENTRY(swapper_pg_dir)
15108 +#ifdef CONFIG_X86_PAE
15109 + .fill 4,8,0
15110 +#else
15111 .fill 1024,4,0
15112 +#endif
15113 +
15114 +/*
15115 + * The IDT has to be page-aligned to simplify the Pentium
15116 + * F0 0F bug workaround.. We have a special link segment
15117 + * for this.
15118 + */
15119 +.section .idt,"a",@progbits
15120 +ENTRY(idt_table)
15121 + .fill 256,8,0
15122
15123 /*
15124 * This starts the data section.
15125 */
15126 #ifdef CONFIG_X86_PAE
15127 -__PAGE_ALIGNED_DATA
15128 - /* Page-aligned for the benefit of paravirt? */
15129 - .align PAGE_SIZE
15130 +.section .initial_page_table,"a",@progbits
15131 ENTRY(initial_page_table)
15132 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
15133 # if KPMDS == 3
15134 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
15135 # error "Kernel PMDs should be 1, 2 or 3"
15136 # endif
15137 .align PAGE_SIZE /* needs to be page-sized too */
15138 +
15139 +#ifdef CONFIG_PAX_PER_CPU_PGD
15140 +ENTRY(cpu_pgd)
15141 + .rept NR_CPUS
15142 + .fill 4,8,0
15143 + .endr
15144 +#endif
15145 +
15146 #endif
15147
15148 .data
15149 .balign 4
15150 ENTRY(stack_start)
15151 - .long init_thread_union+THREAD_SIZE
15152 + .long init_thread_union+THREAD_SIZE-8
15153
15154 +ready: .byte 0
15155 +
15156 +.section .rodata,"a",@progbits
15157 early_recursion_flag:
15158 .long 0
15159
15160 -ready: .byte 0
15161 -
15162 int_msg:
15163 .asciz "Unknown interrupt or fault at: %p %p %p\n"
15164
15165 @@ -707,7 +811,7 @@ fault_msg:
15166 .word 0 # 32 bit align gdt_desc.address
15167 boot_gdt_descr:
15168 .word __BOOT_DS+7
15169 - .long boot_gdt - __PAGE_OFFSET
15170 + .long pa(boot_gdt)
15171
15172 .word 0 # 32-bit align idt_desc.address
15173 idt_descr:
15174 @@ -718,7 +822,7 @@ idt_descr:
15175 .word 0 # 32 bit align gdt_desc.address
15176 ENTRY(early_gdt_descr)
15177 .word GDT_ENTRIES*8-1
15178 - .long gdt_page /* Overwritten for secondary CPUs */
15179 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
15180
15181 /*
15182 * The boot_gdt must mirror the equivalent in setup.S and is
15183 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
15184 .align L1_CACHE_BYTES
15185 ENTRY(boot_gdt)
15186 .fill GDT_ENTRY_BOOT_CS,8,0
15187 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
15188 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
15189 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
15190 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
15191 +
15192 + .align PAGE_SIZE_asm
15193 +ENTRY(cpu_gdt_table)
15194 + .rept NR_CPUS
15195 + .quad 0x0000000000000000 /* NULL descriptor */
15196 + .quad 0x0000000000000000 /* 0x0b reserved */
15197 + .quad 0x0000000000000000 /* 0x13 reserved */
15198 + .quad 0x0000000000000000 /* 0x1b reserved */
15199 +
15200 +#ifdef CONFIG_PAX_KERNEXEC
15201 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
15202 +#else
15203 + .quad 0x0000000000000000 /* 0x20 unused */
15204 +#endif
15205 +
15206 + .quad 0x0000000000000000 /* 0x28 unused */
15207 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
15208 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
15209 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
15210 + .quad 0x0000000000000000 /* 0x4b reserved */
15211 + .quad 0x0000000000000000 /* 0x53 reserved */
15212 + .quad 0x0000000000000000 /* 0x5b reserved */
15213 +
15214 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
15215 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
15216 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
15217 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
15218 +
15219 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
15220 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
15221 +
15222 + /*
15223 + * Segments used for calling PnP BIOS have byte granularity.
15224 + * The code segments and data segments have fixed 64k limits,
15225 + * the transfer segment sizes are set at run time.
15226 + */
15227 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
15228 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
15229 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
15230 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
15231 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
15232 +
15233 + /*
15234 + * The APM segments have byte granularity and their bases
15235 + * are set at run time. All have 64k limits.
15236 + */
15237 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
15238 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
15239 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
15240 +
15241 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
15242 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15243 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
15244 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
15245 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
15246 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
15247 +
15248 + /* Be sure this is zeroed to avoid false validations in Xen */
15249 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
15250 + .endr
15251 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
15252 index e11e394..9aebc5d 100644
15253 --- a/arch/x86/kernel/head_64.S
15254 +++ b/arch/x86/kernel/head_64.S
15255 @@ -19,6 +19,8 @@
15256 #include <asm/cache.h>
15257 #include <asm/processor-flags.h>
15258 #include <asm/percpu.h>
15259 +#include <asm/cpufeature.h>
15260 +#include <asm/alternative-asm.h>
15261
15262 #ifdef CONFIG_PARAVIRT
15263 #include <asm/asm-offsets.h>
15264 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15265 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15266 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15267 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15268 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
15269 +L3_VMALLOC_START = pud_index(VMALLOC_START)
15270 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
15271 +L3_VMALLOC_END = pud_index(VMALLOC_END)
15272 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15273 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15274
15275 .text
15276 __HEAD
15277 @@ -85,35 +93,23 @@ startup_64:
15278 */
15279 addq %rbp, init_level4_pgt + 0(%rip)
15280 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15281 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15282 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15283 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15284 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15285
15286 addq %rbp, level3_ident_pgt + 0(%rip)
15287 +#ifndef CONFIG_XEN
15288 + addq %rbp, level3_ident_pgt + 8(%rip)
15289 +#endif
15290
15291 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15292 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15293 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15294 +
15295 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15296 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15297
15298 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15299 -
15300 - /* Add an Identity mapping if I am above 1G */
15301 - leaq _text(%rip), %rdi
15302 - andq $PMD_PAGE_MASK, %rdi
15303 -
15304 - movq %rdi, %rax
15305 - shrq $PUD_SHIFT, %rax
15306 - andq $(PTRS_PER_PUD - 1), %rax
15307 - jz ident_complete
15308 -
15309 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15310 - leaq level3_ident_pgt(%rip), %rbx
15311 - movq %rdx, 0(%rbx, %rax, 8)
15312 -
15313 - movq %rdi, %rax
15314 - shrq $PMD_SHIFT, %rax
15315 - andq $(PTRS_PER_PMD - 1), %rax
15316 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15317 - leaq level2_spare_pgt(%rip), %rbx
15318 - movq %rdx, 0(%rbx, %rax, 8)
15319 -ident_complete:
15320 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15321
15322 /*
15323 * Fixup the kernel text+data virtual addresses. Note that
15324 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15325 * after the boot processor executes this code.
15326 */
15327
15328 - /* Enable PAE mode and PGE */
15329 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15330 + /* Enable PAE mode and PSE/PGE */
15331 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15332 movq %rax, %cr4
15333
15334 /* Setup early boot stage 4 level pagetables. */
15335 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15336 movl $MSR_EFER, %ecx
15337 rdmsr
15338 btsl $_EFER_SCE, %eax /* Enable System Call */
15339 - btl $20,%edi /* No Execute supported? */
15340 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15341 jnc 1f
15342 btsl $_EFER_NX, %eax
15343 + leaq init_level4_pgt(%rip), %rdi
15344 +#ifndef CONFIG_EFI
15345 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15346 +#endif
15347 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15348 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15349 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15350 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15351 1: wrmsr /* Make changes effective */
15352
15353 /* Setup cr0 */
15354 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15355 * jump. In addition we need to ensure %cs is set so we make this
15356 * a far return.
15357 */
15358 + pax_set_fptr_mask
15359 movq initial_code(%rip),%rax
15360 pushq $0 # fake return address to stop unwinder
15361 pushq $__KERNEL_CS # set correct cs
15362 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15363 bad_address:
15364 jmp bad_address
15365
15366 - .section ".init.text","ax"
15367 + __INIT
15368 #ifdef CONFIG_EARLY_PRINTK
15369 .globl early_idt_handlers
15370 early_idt_handlers:
15371 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15372 #endif /* EARLY_PRINTK */
15373 1: hlt
15374 jmp 1b
15375 + .previous
15376
15377 #ifdef CONFIG_EARLY_PRINTK
15378 + __INITDATA
15379 early_recursion_flag:
15380 .long 0
15381 + .previous
15382
15383 + .section .rodata,"a",@progbits
15384 early_idt_msg:
15385 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15386 early_idt_ripmsg:
15387 .asciz "RIP %s\n"
15388 + .previous
15389 #endif /* CONFIG_EARLY_PRINTK */
15390 - .previous
15391
15392 + .section .rodata,"a",@progbits
15393 #define NEXT_PAGE(name) \
15394 .balign PAGE_SIZE; \
15395 ENTRY(name)
15396 @@ -338,7 +348,6 @@ ENTRY(name)
15397 i = i + 1 ; \
15398 .endr
15399
15400 - .data
15401 /*
15402 * This default setting generates an ident mapping at address 0x100000
15403 * and a mapping for the kernel that precisely maps virtual address
15404 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15405 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15406 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15407 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15408 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15409 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15410 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15411 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15412 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15413 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15414 .org init_level4_pgt + L4_START_KERNEL*8, 0
15415 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15416 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15417
15418 +#ifdef CONFIG_PAX_PER_CPU_PGD
15419 +NEXT_PAGE(cpu_pgd)
15420 + .rept NR_CPUS
15421 + .fill 512,8,0
15422 + .endr
15423 +#endif
15424 +
15425 NEXT_PAGE(level3_ident_pgt)
15426 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15427 +#ifdef CONFIG_XEN
15428 .fill 511,8,0
15429 +#else
15430 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15431 + .fill 510,8,0
15432 +#endif
15433 +
15434 +NEXT_PAGE(level3_vmalloc_start_pgt)
15435 + .fill 512,8,0
15436 +
15437 +NEXT_PAGE(level3_vmalloc_end_pgt)
15438 + .fill 512,8,0
15439 +
15440 +NEXT_PAGE(level3_vmemmap_pgt)
15441 + .fill L3_VMEMMAP_START,8,0
15442 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15443
15444 NEXT_PAGE(level3_kernel_pgt)
15445 .fill L3_START_KERNEL,8,0
15446 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15447 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15448 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15449
15450 +NEXT_PAGE(level2_vmemmap_pgt)
15451 + .fill 512,8,0
15452 +
15453 NEXT_PAGE(level2_fixmap_pgt)
15454 - .fill 506,8,0
15455 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15456 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15457 - .fill 5,8,0
15458 + .fill 507,8,0
15459 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15460 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15461 + .fill 4,8,0
15462
15463 -NEXT_PAGE(level1_fixmap_pgt)
15464 +NEXT_PAGE(level1_vsyscall_pgt)
15465 .fill 512,8,0
15466
15467 -NEXT_PAGE(level2_ident_pgt)
15468 - /* Since I easily can, map the first 1G.
15469 + /* Since I easily can, map the first 2G.
15470 * Don't set NX because code runs from these pages.
15471 */
15472 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15473 +NEXT_PAGE(level2_ident_pgt)
15474 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15475
15476 NEXT_PAGE(level2_kernel_pgt)
15477 /*
15478 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15479 * If you want to increase this then increase MODULES_VADDR
15480 * too.)
15481 */
15482 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15483 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15484 -
15485 -NEXT_PAGE(level2_spare_pgt)
15486 - .fill 512, 8, 0
15487 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15488
15489 #undef PMDS
15490 #undef NEXT_PAGE
15491
15492 - .data
15493 + .align PAGE_SIZE
15494 +ENTRY(cpu_gdt_table)
15495 + .rept NR_CPUS
15496 + .quad 0x0000000000000000 /* NULL descriptor */
15497 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15498 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15499 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15500 + .quad 0x00cffb000000ffff /* __USER32_CS */
15501 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15502 + .quad 0x00affb000000ffff /* __USER_CS */
15503 +
15504 +#ifdef CONFIG_PAX_KERNEXEC
15505 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15506 +#else
15507 + .quad 0x0 /* unused */
15508 +#endif
15509 +
15510 + .quad 0,0 /* TSS */
15511 + .quad 0,0 /* LDT */
15512 + .quad 0,0,0 /* three TLS descriptors */
15513 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15514 + /* asm/segment.h:GDT_ENTRIES must match this */
15515 +
15516 + /* zero the remaining page */
15517 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15518 + .endr
15519 +
15520 .align 16
15521 .globl early_gdt_descr
15522 early_gdt_descr:
15523 .word GDT_ENTRIES*8-1
15524 early_gdt_descr_base:
15525 - .quad INIT_PER_CPU_VAR(gdt_page)
15526 + .quad cpu_gdt_table
15527
15528 ENTRY(phys_base)
15529 /* This must match the first entry in level2_kernel_pgt */
15530 .quad 0x0000000000000000
15531
15532 #include "../../x86/xen/xen-head.S"
15533 -
15534 - .section .bss, "aw", @nobits
15535 +
15536 + .section .rodata,"a",@progbits
15537 .align L1_CACHE_BYTES
15538 ENTRY(idt_table)
15539 - .skip IDT_ENTRIES * 16
15540 + .fill 512,8,0
15541
15542 __PAGE_ALIGNED_BSS
15543 .align PAGE_SIZE
15544 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15545 index 9c3bd4a..e1d9b35 100644
15546 --- a/arch/x86/kernel/i386_ksyms_32.c
15547 +++ b/arch/x86/kernel/i386_ksyms_32.c
15548 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15549 EXPORT_SYMBOL(cmpxchg8b_emu);
15550 #endif
15551
15552 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15553 +
15554 /* Networking helper routines. */
15555 EXPORT_SYMBOL(csum_partial_copy_generic);
15556 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15557 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15558
15559 EXPORT_SYMBOL(__get_user_1);
15560 EXPORT_SYMBOL(__get_user_2);
15561 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15562
15563 EXPORT_SYMBOL(csum_partial);
15564 EXPORT_SYMBOL(empty_zero_page);
15565 +
15566 +#ifdef CONFIG_PAX_KERNEXEC
15567 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15568 +#endif
15569 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15570 index 6104852..6114160 100644
15571 --- a/arch/x86/kernel/i8259.c
15572 +++ b/arch/x86/kernel/i8259.c
15573 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15574 "spurious 8259A interrupt: IRQ%d.\n", irq);
15575 spurious_irq_mask |= irqmask;
15576 }
15577 - atomic_inc(&irq_err_count);
15578 + atomic_inc_unchecked(&irq_err_count);
15579 /*
15580 * Theoretically we do not have to handle this IRQ,
15581 * but in Linux this does not cause problems and is
15582 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15583 index 43e9ccf..44ccf6f 100644
15584 --- a/arch/x86/kernel/init_task.c
15585 +++ b/arch/x86/kernel/init_task.c
15586 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15587 * way process stacks are handled. This is done by having a special
15588 * "init_task" linker map entry..
15589 */
15590 -union thread_union init_thread_union __init_task_data =
15591 - { INIT_THREAD_INFO(init_task) };
15592 +union thread_union init_thread_union __init_task_data;
15593
15594 /*
15595 * Initial task structure.
15596 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15597 * section. Since TSS's are completely CPU-local, we want them
15598 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15599 */
15600 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15601 -
15602 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15603 +EXPORT_SYMBOL(init_tss);
15604 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15605 index 8c96897..be66bfa 100644
15606 --- a/arch/x86/kernel/ioport.c
15607 +++ b/arch/x86/kernel/ioport.c
15608 @@ -6,6 +6,7 @@
15609 #include <linux/sched.h>
15610 #include <linux/kernel.h>
15611 #include <linux/capability.h>
15612 +#include <linux/security.h>
15613 #include <linux/errno.h>
15614 #include <linux/types.h>
15615 #include <linux/ioport.h>
15616 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15617
15618 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15619 return -EINVAL;
15620 +#ifdef CONFIG_GRKERNSEC_IO
15621 + if (turn_on && grsec_disable_privio) {
15622 + gr_handle_ioperm();
15623 + return -EPERM;
15624 + }
15625 +#endif
15626 if (turn_on && !capable(CAP_SYS_RAWIO))
15627 return -EPERM;
15628
15629 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15630 * because the ->io_bitmap_max value must match the bitmap
15631 * contents:
15632 */
15633 - tss = &per_cpu(init_tss, get_cpu());
15634 + tss = init_tss + get_cpu();
15635
15636 if (turn_on)
15637 bitmap_clear(t->io_bitmap_ptr, from, num);
15638 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15639 return -EINVAL;
15640 /* Trying to gain more privileges? */
15641 if (level > old) {
15642 +#ifdef CONFIG_GRKERNSEC_IO
15643 + if (grsec_disable_privio) {
15644 + gr_handle_iopl();
15645 + return -EPERM;
15646 + }
15647 +#endif
15648 if (!capable(CAP_SYS_RAWIO))
15649 return -EPERM;
15650 }
15651 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15652 index 429e0c9..17b3ece 100644
15653 --- a/arch/x86/kernel/irq.c
15654 +++ b/arch/x86/kernel/irq.c
15655 @@ -18,7 +18,7 @@
15656 #include <asm/mce.h>
15657 #include <asm/hw_irq.h>
15658
15659 -atomic_t irq_err_count;
15660 +atomic_unchecked_t irq_err_count;
15661
15662 /* Function pointer for generic interrupt vector handling */
15663 void (*x86_platform_ipi_callback)(void) = NULL;
15664 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15665 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15666 seq_printf(p, " Machine check polls\n");
15667 #endif
15668 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15669 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15670 #if defined(CONFIG_X86_IO_APIC)
15671 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15672 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15673 #endif
15674 return 0;
15675 }
15676 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15677
15678 u64 arch_irq_stat(void)
15679 {
15680 - u64 sum = atomic_read(&irq_err_count);
15681 + u64 sum = atomic_read_unchecked(&irq_err_count);
15682
15683 #ifdef CONFIG_X86_IO_APIC
15684 - sum += atomic_read(&irq_mis_count);
15685 + sum += atomic_read_unchecked(&irq_mis_count);
15686 #endif
15687 return sum;
15688 }
15689 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15690 index 7209070..cbcd71a 100644
15691 --- a/arch/x86/kernel/irq_32.c
15692 +++ b/arch/x86/kernel/irq_32.c
15693 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15694 __asm__ __volatile__("andl %%esp,%0" :
15695 "=r" (sp) : "0" (THREAD_SIZE - 1));
15696
15697 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15698 + return sp < STACK_WARN;
15699 }
15700
15701 static void print_stack_overflow(void)
15702 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15703 * per-CPU IRQ handling contexts (thread information and stack)
15704 */
15705 union irq_ctx {
15706 - struct thread_info tinfo;
15707 - u32 stack[THREAD_SIZE/sizeof(u32)];
15708 + unsigned long previous_esp;
15709 + u32 stack[THREAD_SIZE/sizeof(u32)];
15710 } __attribute__((aligned(THREAD_SIZE)));
15711
15712 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15713 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15714 static inline int
15715 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15716 {
15717 - union irq_ctx *curctx, *irqctx;
15718 + union irq_ctx *irqctx;
15719 u32 *isp, arg1, arg2;
15720
15721 - curctx = (union irq_ctx *) current_thread_info();
15722 irqctx = __this_cpu_read(hardirq_ctx);
15723
15724 /*
15725 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15726 * handler) we can't do that and just have to keep using the
15727 * current stack (which is the irq stack already after all)
15728 */
15729 - if (unlikely(curctx == irqctx))
15730 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15731 return 0;
15732
15733 /* build the stack frame on the IRQ stack */
15734 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15735 - irqctx->tinfo.task = curctx->tinfo.task;
15736 - irqctx->tinfo.previous_esp = current_stack_pointer;
15737 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15738 + irqctx->previous_esp = current_stack_pointer;
15739
15740 - /*
15741 - * Copy the softirq bits in preempt_count so that the
15742 - * softirq checks work in the hardirq context.
15743 - */
15744 - irqctx->tinfo.preempt_count =
15745 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15746 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15747 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15748 + __set_fs(MAKE_MM_SEG(0));
15749 +#endif
15750
15751 if (unlikely(overflow))
15752 call_on_stack(print_stack_overflow, isp);
15753 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15754 : "0" (irq), "1" (desc), "2" (isp),
15755 "D" (desc->handle_irq)
15756 : "memory", "cc", "ecx");
15757 +
15758 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15759 + __set_fs(current_thread_info()->addr_limit);
15760 +#endif
15761 +
15762 return 1;
15763 }
15764
15765 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15766 */
15767 void __cpuinit irq_ctx_init(int cpu)
15768 {
15769 - union irq_ctx *irqctx;
15770 -
15771 if (per_cpu(hardirq_ctx, cpu))
15772 return;
15773
15774 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15775 - THREAD_FLAGS,
15776 - THREAD_ORDER));
15777 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15778 - irqctx->tinfo.cpu = cpu;
15779 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15780 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15781 -
15782 - per_cpu(hardirq_ctx, cpu) = irqctx;
15783 -
15784 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15785 - THREAD_FLAGS,
15786 - THREAD_ORDER));
15787 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15788 - irqctx->tinfo.cpu = cpu;
15789 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15790 -
15791 - per_cpu(softirq_ctx, cpu) = irqctx;
15792 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15793 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15794
15795 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15796 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15797 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15798 asmlinkage void do_softirq(void)
15799 {
15800 unsigned long flags;
15801 - struct thread_info *curctx;
15802 union irq_ctx *irqctx;
15803 u32 *isp;
15804
15805 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15806 local_irq_save(flags);
15807
15808 if (local_softirq_pending()) {
15809 - curctx = current_thread_info();
15810 irqctx = __this_cpu_read(softirq_ctx);
15811 - irqctx->tinfo.task = curctx->task;
15812 - irqctx->tinfo.previous_esp = current_stack_pointer;
15813 + irqctx->previous_esp = current_stack_pointer;
15814
15815 /* build the stack frame on the softirq stack */
15816 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15817 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15818 +
15819 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15820 + __set_fs(MAKE_MM_SEG(0));
15821 +#endif
15822
15823 call_on_stack(__do_softirq, isp);
15824 +
15825 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15826 + __set_fs(current_thread_info()->addr_limit);
15827 +#endif
15828 +
15829 /*
15830 * Shouldn't happen, we returned above if in_interrupt():
15831 */
15832 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15833 index 69bca46..0bac999 100644
15834 --- a/arch/x86/kernel/irq_64.c
15835 +++ b/arch/x86/kernel/irq_64.c
15836 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15837 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15838 u64 curbase = (u64)task_stack_page(current);
15839
15840 - if (user_mode_vm(regs))
15841 + if (user_mode(regs))
15842 return;
15843
15844 WARN_ONCE(regs->sp >= curbase &&
15845 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15846 index faba577..93b9e71 100644
15847 --- a/arch/x86/kernel/kgdb.c
15848 +++ b/arch/x86/kernel/kgdb.c
15849 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15850 #ifdef CONFIG_X86_32
15851 switch (regno) {
15852 case GDB_SS:
15853 - if (!user_mode_vm(regs))
15854 + if (!user_mode(regs))
15855 *(unsigned long *)mem = __KERNEL_DS;
15856 break;
15857 case GDB_SP:
15858 - if (!user_mode_vm(regs))
15859 + if (!user_mode(regs))
15860 *(unsigned long *)mem = kernel_stack_pointer(regs);
15861 break;
15862 case GDB_GS:
15863 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15864 case 'k':
15865 /* clear the trace bit */
15866 linux_regs->flags &= ~X86_EFLAGS_TF;
15867 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15868 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15869
15870 /* set the trace bit if we're stepping */
15871 if (remcomInBuffer[0] == 's') {
15872 linux_regs->flags |= X86_EFLAGS_TF;
15873 - atomic_set(&kgdb_cpu_doing_single_step,
15874 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15875 raw_smp_processor_id());
15876 }
15877
15878 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15879
15880 switch (cmd) {
15881 case DIE_DEBUG:
15882 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15883 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15884 if (user_mode(regs))
15885 return single_step_cont(regs, args);
15886 break;
15887 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15888 index 7da647d..56fe348 100644
15889 --- a/arch/x86/kernel/kprobes.c
15890 +++ b/arch/x86/kernel/kprobes.c
15891 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15892 } __attribute__((packed)) *insn;
15893
15894 insn = (struct __arch_relative_insn *)from;
15895 +
15896 + pax_open_kernel();
15897 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15898 insn->op = op;
15899 + pax_close_kernel();
15900 }
15901
15902 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15903 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15904 kprobe_opcode_t opcode;
15905 kprobe_opcode_t *orig_opcodes = opcodes;
15906
15907 - if (search_exception_tables((unsigned long)opcodes))
15908 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15909 return 0; /* Page fault may occur on this address. */
15910
15911 retry:
15912 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15913 }
15914 }
15915 insn_get_length(&insn);
15916 + pax_open_kernel();
15917 memcpy(dest, insn.kaddr, insn.length);
15918 + pax_close_kernel();
15919
15920 #ifdef CONFIG_X86_64
15921 if (insn_rip_relative(&insn)) {
15922 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15923 (u8 *) dest;
15924 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15925 disp = (u8 *) dest + insn_offset_displacement(&insn);
15926 + pax_open_kernel();
15927 *(s32 *) disp = (s32) newdisp;
15928 + pax_close_kernel();
15929 }
15930 #endif
15931 return insn.length;
15932 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15933 */
15934 __copy_instruction(p->ainsn.insn, p->addr, 0);
15935
15936 - if (can_boost(p->addr))
15937 + if (can_boost(ktla_ktva(p->addr)))
15938 p->ainsn.boostable = 0;
15939 else
15940 p->ainsn.boostable = -1;
15941
15942 - p->opcode = *p->addr;
15943 + p->opcode = *(ktla_ktva(p->addr));
15944 }
15945
15946 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15947 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15948 * nor set current_kprobe, because it doesn't use single
15949 * stepping.
15950 */
15951 - regs->ip = (unsigned long)p->ainsn.insn;
15952 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15953 preempt_enable_no_resched();
15954 return;
15955 }
15956 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15957 if (p->opcode == BREAKPOINT_INSTRUCTION)
15958 regs->ip = (unsigned long)p->addr;
15959 else
15960 - regs->ip = (unsigned long)p->ainsn.insn;
15961 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15962 }
15963
15964 /*
15965 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15966 setup_singlestep(p, regs, kcb, 0);
15967 return 1;
15968 }
15969 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15970 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15971 /*
15972 * The breakpoint instruction was removed right
15973 * after we hit it. Another cpu has removed
15974 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15975 " movq %rax, 152(%rsp)\n"
15976 RESTORE_REGS_STRING
15977 " popfq\n"
15978 +#ifdef KERNEXEC_PLUGIN
15979 + " btsq $63,(%rsp)\n"
15980 +#endif
15981 #else
15982 " pushf\n"
15983 SAVE_REGS_STRING
15984 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15985 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15986 {
15987 unsigned long *tos = stack_addr(regs);
15988 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15989 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15990 unsigned long orig_ip = (unsigned long)p->addr;
15991 kprobe_opcode_t *insn = p->ainsn.insn;
15992
15993 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15994 struct die_args *args = data;
15995 int ret = NOTIFY_DONE;
15996
15997 - if (args->regs && user_mode_vm(args->regs))
15998 + if (args->regs && user_mode(args->regs))
15999 return ret;
16000
16001 switch (val) {
16002 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
16003 * Verify if the address gap is in 2GB range, because this uses
16004 * a relative jump.
16005 */
16006 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
16007 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
16008 if (abs(rel) > 0x7fffffff)
16009 return -ERANGE;
16010
16011 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
16012 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
16013
16014 /* Set probe function call */
16015 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
16016 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
16017
16018 /* Set returning jmp instruction at the tail of out-of-line buffer */
16019 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
16020 - (u8 *)op->kp.addr + op->optinsn.size);
16021 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
16022
16023 flush_icache_range((unsigned long) buf,
16024 (unsigned long) buf + TMPL_END_IDX +
16025 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
16026 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
16027
16028 /* Backup instructions which will be replaced by jump address */
16029 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
16030 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
16031 RELATIVE_ADDR_SIZE);
16032
16033 insn_buf[0] = RELATIVEJUMP_OPCODE;
16034 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
16035 index a9c2116..a52d4fc 100644
16036 --- a/arch/x86/kernel/kvm.c
16037 +++ b/arch/x86/kernel/kvm.c
16038 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
16039 pv_mmu_ops.set_pud = kvm_set_pud;
16040 #if PAGETABLE_LEVELS == 4
16041 pv_mmu_ops.set_pgd = kvm_set_pgd;
16042 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
16043 #endif
16044 #endif
16045 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
16046 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
16047 index ea69726..604d066 100644
16048 --- a/arch/x86/kernel/ldt.c
16049 +++ b/arch/x86/kernel/ldt.c
16050 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
16051 if (reload) {
16052 #ifdef CONFIG_SMP
16053 preempt_disable();
16054 - load_LDT(pc);
16055 + load_LDT_nolock(pc);
16056 if (!cpumask_equal(mm_cpumask(current->mm),
16057 cpumask_of(smp_processor_id())))
16058 smp_call_function(flush_ldt, current->mm, 1);
16059 preempt_enable();
16060 #else
16061 - load_LDT(pc);
16062 + load_LDT_nolock(pc);
16063 #endif
16064 }
16065 if (oldsize) {
16066 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
16067 return err;
16068
16069 for (i = 0; i < old->size; i++)
16070 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
16071 + write_ldt_entry(new->ldt, i, old->ldt + i);
16072 return 0;
16073 }
16074
16075 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
16076 retval = copy_ldt(&mm->context, &old_mm->context);
16077 mutex_unlock(&old_mm->context.lock);
16078 }
16079 +
16080 + if (tsk == current) {
16081 + mm->context.vdso = 0;
16082 +
16083 +#ifdef CONFIG_X86_32
16084 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16085 + mm->context.user_cs_base = 0UL;
16086 + mm->context.user_cs_limit = ~0UL;
16087 +
16088 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16089 + cpus_clear(mm->context.cpu_user_cs_mask);
16090 +#endif
16091 +
16092 +#endif
16093 +#endif
16094 +
16095 + }
16096 +
16097 return retval;
16098 }
16099
16100 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
16101 }
16102 }
16103
16104 +#ifdef CONFIG_PAX_SEGMEXEC
16105 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
16106 + error = -EINVAL;
16107 + goto out_unlock;
16108 + }
16109 +#endif
16110 +
16111 fill_ldt(&ldt, &ldt_info);
16112 if (oldmode)
16113 ldt.avl = 0;
16114 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
16115 index a3fa43b..8966f4c 100644
16116 --- a/arch/x86/kernel/machine_kexec_32.c
16117 +++ b/arch/x86/kernel/machine_kexec_32.c
16118 @@ -27,7 +27,7 @@
16119 #include <asm/cacheflush.h>
16120 #include <asm/debugreg.h>
16121
16122 -static void set_idt(void *newidt, __u16 limit)
16123 +static void set_idt(struct desc_struct *newidt, __u16 limit)
16124 {
16125 struct desc_ptr curidt;
16126
16127 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
16128 }
16129
16130
16131 -static void set_gdt(void *newgdt, __u16 limit)
16132 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
16133 {
16134 struct desc_ptr curgdt;
16135
16136 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
16137 }
16138
16139 control_page = page_address(image->control_code_page);
16140 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
16141 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
16142
16143 relocate_kernel_ptr = control_page;
16144 page_list[PA_CONTROL_PAGE] = __pa(control_page);
16145 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
16146 index 3ca42d0..7cff8cc 100644
16147 --- a/arch/x86/kernel/microcode_intel.c
16148 +++ b/arch/x86/kernel/microcode_intel.c
16149 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
16150
16151 static int get_ucode_user(void *to, const void *from, size_t n)
16152 {
16153 - return copy_from_user(to, from, n);
16154 + return copy_from_user(to, (const void __force_user *)from, n);
16155 }
16156
16157 static enum ucode_state
16158 request_microcode_user(int cpu, const void __user *buf, size_t size)
16159 {
16160 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
16161 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
16162 }
16163
16164 static void microcode_fini_cpu(int cpu)
16165 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
16166 index 925179f..267ac7a 100644
16167 --- a/arch/x86/kernel/module.c
16168 +++ b/arch/x86/kernel/module.c
16169 @@ -36,15 +36,60 @@
16170 #define DEBUGP(fmt...)
16171 #endif
16172
16173 -void *module_alloc(unsigned long size)
16174 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
16175 {
16176 - if (PAGE_ALIGN(size) > MODULES_LEN)
16177 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
16178 return NULL;
16179 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
16180 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
16181 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
16182 -1, __builtin_return_address(0));
16183 }
16184
16185 +void *module_alloc(unsigned long size)
16186 +{
16187 +
16188 +#ifdef CONFIG_PAX_KERNEXEC
16189 + return __module_alloc(size, PAGE_KERNEL);
16190 +#else
16191 + return __module_alloc(size, PAGE_KERNEL_EXEC);
16192 +#endif
16193 +
16194 +}
16195 +
16196 +#ifdef CONFIG_PAX_KERNEXEC
16197 +#ifdef CONFIG_X86_32
16198 +void *module_alloc_exec(unsigned long size)
16199 +{
16200 + struct vm_struct *area;
16201 +
16202 + if (size == 0)
16203 + return NULL;
16204 +
16205 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
16206 + return area ? area->addr : NULL;
16207 +}
16208 +EXPORT_SYMBOL(module_alloc_exec);
16209 +
16210 +void module_free_exec(struct module *mod, void *module_region)
16211 +{
16212 + vunmap(module_region);
16213 +}
16214 +EXPORT_SYMBOL(module_free_exec);
16215 +#else
16216 +void module_free_exec(struct module *mod, void *module_region)
16217 +{
16218 + module_free(mod, module_region);
16219 +}
16220 +EXPORT_SYMBOL(module_free_exec);
16221 +
16222 +void *module_alloc_exec(unsigned long size)
16223 +{
16224 + return __module_alloc(size, PAGE_KERNEL_RX);
16225 +}
16226 +EXPORT_SYMBOL(module_alloc_exec);
16227 +#endif
16228 +#endif
16229 +
16230 #ifdef CONFIG_X86_32
16231 int apply_relocate(Elf32_Shdr *sechdrs,
16232 const char *strtab,
16233 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16234 unsigned int i;
16235 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
16236 Elf32_Sym *sym;
16237 - uint32_t *location;
16238 + uint32_t *plocation, location;
16239
16240 DEBUGP("Applying relocate section %u to %u\n", relsec,
16241 sechdrs[relsec].sh_info);
16242 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
16243 /* This is where to make the change */
16244 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
16245 - + rel[i].r_offset;
16246 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
16247 + location = (uint32_t)plocation;
16248 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
16249 + plocation = ktla_ktva((void *)plocation);
16250 /* This is the symbol it is referring to. Note that all
16251 undefined symbols have been resolved. */
16252 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16253 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16254 switch (ELF32_R_TYPE(rel[i].r_info)) {
16255 case R_386_32:
16256 /* We add the value into the location given */
16257 - *location += sym->st_value;
16258 + pax_open_kernel();
16259 + *plocation += sym->st_value;
16260 + pax_close_kernel();
16261 break;
16262 case R_386_PC32:
16263 /* Add the value, subtract its postition */
16264 - *location += sym->st_value - (uint32_t)location;
16265 + pax_open_kernel();
16266 + *plocation += sym->st_value - location;
16267 + pax_close_kernel();
16268 break;
16269 default:
16270 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16271 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16272 case R_X86_64_NONE:
16273 break;
16274 case R_X86_64_64:
16275 + pax_open_kernel();
16276 *(u64 *)loc = val;
16277 + pax_close_kernel();
16278 break;
16279 case R_X86_64_32:
16280 + pax_open_kernel();
16281 *(u32 *)loc = val;
16282 + pax_close_kernel();
16283 if (val != *(u32 *)loc)
16284 goto overflow;
16285 break;
16286 case R_X86_64_32S:
16287 + pax_open_kernel();
16288 *(s32 *)loc = val;
16289 + pax_close_kernel();
16290 if ((s64)val != *(s32 *)loc)
16291 goto overflow;
16292 break;
16293 case R_X86_64_PC32:
16294 val -= (u64)loc;
16295 + pax_open_kernel();
16296 *(u32 *)loc = val;
16297 + pax_close_kernel();
16298 +
16299 #if 0
16300 if ((s64)val != *(s32 *)loc)
16301 goto overflow;
16302 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16303 index e88f37b..1353db6 100644
16304 --- a/arch/x86/kernel/nmi.c
16305 +++ b/arch/x86/kernel/nmi.c
16306 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16307 dotraplinkage notrace __kprobes void
16308 do_nmi(struct pt_regs *regs, long error_code)
16309 {
16310 +
16311 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16312 + if (!user_mode(regs)) {
16313 + unsigned long cs = regs->cs & 0xFFFF;
16314 + unsigned long ip = ktva_ktla(regs->ip);
16315 +
16316 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16317 + regs->ip = ip;
16318 + }
16319 +#endif
16320 +
16321 nmi_enter();
16322
16323 inc_irq_stat(__nmi_count);
16324 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16325 index 676b8c7..870ba04 100644
16326 --- a/arch/x86/kernel/paravirt-spinlocks.c
16327 +++ b/arch/x86/kernel/paravirt-spinlocks.c
16328 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16329 arch_spin_lock(lock);
16330 }
16331
16332 -struct pv_lock_ops pv_lock_ops = {
16333 +struct pv_lock_ops pv_lock_ops __read_only = {
16334 #ifdef CONFIG_SMP
16335 .spin_is_locked = __ticket_spin_is_locked,
16336 .spin_is_contended = __ticket_spin_is_contended,
16337 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16338 index d90272e..6bb013b 100644
16339 --- a/arch/x86/kernel/paravirt.c
16340 +++ b/arch/x86/kernel/paravirt.c
16341 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16342 {
16343 return x;
16344 }
16345 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16346 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16347 +#endif
16348
16349 void __init default_banner(void)
16350 {
16351 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16352 if (opfunc == NULL)
16353 /* If there's no function, patch it with a ud2a (BUG) */
16354 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16355 - else if (opfunc == _paravirt_nop)
16356 + else if (opfunc == (void *)_paravirt_nop)
16357 /* If the operation is a nop, then nop the callsite */
16358 ret = paravirt_patch_nop();
16359
16360 /* identity functions just return their single argument */
16361 - else if (opfunc == _paravirt_ident_32)
16362 + else if (opfunc == (void *)_paravirt_ident_32)
16363 ret = paravirt_patch_ident_32(insnbuf, len);
16364 - else if (opfunc == _paravirt_ident_64)
16365 + else if (opfunc == (void *)_paravirt_ident_64)
16366 ret = paravirt_patch_ident_64(insnbuf, len);
16367 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16368 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16369 + ret = paravirt_patch_ident_64(insnbuf, len);
16370 +#endif
16371
16372 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16373 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16374 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16375 if (insn_len > len || start == NULL)
16376 insn_len = len;
16377 else
16378 - memcpy(insnbuf, start, insn_len);
16379 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16380
16381 return insn_len;
16382 }
16383 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16384 preempt_enable();
16385 }
16386
16387 -struct pv_info pv_info = {
16388 +struct pv_info pv_info __read_only = {
16389 .name = "bare hardware",
16390 .paravirt_enabled = 0,
16391 .kernel_rpl = 0,
16392 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
16393 #endif
16394 };
16395
16396 -struct pv_init_ops pv_init_ops = {
16397 +struct pv_init_ops pv_init_ops __read_only = {
16398 .patch = native_patch,
16399 };
16400
16401 -struct pv_time_ops pv_time_ops = {
16402 +struct pv_time_ops pv_time_ops __read_only = {
16403 .sched_clock = native_sched_clock,
16404 .steal_clock = native_steal_clock,
16405 };
16406
16407 -struct pv_irq_ops pv_irq_ops = {
16408 +struct pv_irq_ops pv_irq_ops __read_only = {
16409 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16410 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16411 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16412 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16413 #endif
16414 };
16415
16416 -struct pv_cpu_ops pv_cpu_ops = {
16417 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16418 .cpuid = native_cpuid,
16419 .get_debugreg = native_get_debugreg,
16420 .set_debugreg = native_set_debugreg,
16421 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16422 .end_context_switch = paravirt_nop,
16423 };
16424
16425 -struct pv_apic_ops pv_apic_ops = {
16426 +struct pv_apic_ops pv_apic_ops __read_only = {
16427 #ifdef CONFIG_X86_LOCAL_APIC
16428 .startup_ipi_hook = paravirt_nop,
16429 #endif
16430 };
16431
16432 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16433 +#ifdef CONFIG_X86_32
16434 +#ifdef CONFIG_X86_PAE
16435 +/* 64-bit pagetable entries */
16436 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16437 +#else
16438 /* 32-bit pagetable entries */
16439 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16440 +#endif
16441 #else
16442 /* 64-bit pagetable entries */
16443 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16444 #endif
16445
16446 -struct pv_mmu_ops pv_mmu_ops = {
16447 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16448
16449 .read_cr2 = native_read_cr2,
16450 .write_cr2 = native_write_cr2,
16451 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16452 .make_pud = PTE_IDENT,
16453
16454 .set_pgd = native_set_pgd,
16455 + .set_pgd_batched = native_set_pgd_batched,
16456 #endif
16457 #endif /* PAGETABLE_LEVELS >= 3 */
16458
16459 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16460 },
16461
16462 .set_fixmap = native_set_fixmap,
16463 +
16464 +#ifdef CONFIG_PAX_KERNEXEC
16465 + .pax_open_kernel = native_pax_open_kernel,
16466 + .pax_close_kernel = native_pax_close_kernel,
16467 +#endif
16468 +
16469 };
16470
16471 EXPORT_SYMBOL_GPL(pv_time_ops);
16472 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16473 index 35ccf75..7a15747 100644
16474 --- a/arch/x86/kernel/pci-iommu_table.c
16475 +++ b/arch/x86/kernel/pci-iommu_table.c
16476 @@ -2,7 +2,7 @@
16477 #include <asm/iommu_table.h>
16478 #include <linux/string.h>
16479 #include <linux/kallsyms.h>
16480 -
16481 +#include <linux/sched.h>
16482
16483 #define DEBUG 1
16484
16485 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16486 index ee5d4fb..426649b 100644
16487 --- a/arch/x86/kernel/process.c
16488 +++ b/arch/x86/kernel/process.c
16489 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16490
16491 void free_thread_info(struct thread_info *ti)
16492 {
16493 - free_thread_xstate(ti->task);
16494 free_pages((unsigned long)ti, THREAD_ORDER);
16495 }
16496
16497 +static struct kmem_cache *task_struct_cachep;
16498 +
16499 void arch_task_cache_init(void)
16500 {
16501 - task_xstate_cachep =
16502 - kmem_cache_create("task_xstate", xstate_size,
16503 + /* create a slab on which task_structs can be allocated */
16504 + task_struct_cachep =
16505 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16506 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16507 +
16508 + task_xstate_cachep =
16509 + kmem_cache_create("task_xstate", xstate_size,
16510 __alignof__(union thread_xstate),
16511 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16512 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16513 +}
16514 +
16515 +struct task_struct *alloc_task_struct_node(int node)
16516 +{
16517 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16518 +}
16519 +
16520 +void free_task_struct(struct task_struct *task)
16521 +{
16522 + free_thread_xstate(task);
16523 + kmem_cache_free(task_struct_cachep, task);
16524 }
16525
16526 /*
16527 @@ -70,7 +87,7 @@ void exit_thread(void)
16528 unsigned long *bp = t->io_bitmap_ptr;
16529
16530 if (bp) {
16531 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16532 + struct tss_struct *tss = init_tss + get_cpu();
16533
16534 t->io_bitmap_ptr = NULL;
16535 clear_thread_flag(TIF_IO_BITMAP);
16536 @@ -106,7 +123,7 @@ void show_regs_common(void)
16537
16538 printk(KERN_CONT "\n");
16539 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16540 - current->pid, current->comm, print_tainted(),
16541 + task_pid_nr(current), current->comm, print_tainted(),
16542 init_utsname()->release,
16543 (int)strcspn(init_utsname()->version, " "),
16544 init_utsname()->version);
16545 @@ -120,6 +137,9 @@ void flush_thread(void)
16546 {
16547 struct task_struct *tsk = current;
16548
16549 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16550 + loadsegment(gs, 0);
16551 +#endif
16552 flush_ptrace_hw_breakpoint(tsk);
16553 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16554 /*
16555 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16556 regs.di = (unsigned long) arg;
16557
16558 #ifdef CONFIG_X86_32
16559 - regs.ds = __USER_DS;
16560 - regs.es = __USER_DS;
16561 + regs.ds = __KERNEL_DS;
16562 + regs.es = __KERNEL_DS;
16563 regs.fs = __KERNEL_PERCPU;
16564 - regs.gs = __KERNEL_STACK_CANARY;
16565 + savesegment(gs, regs.gs);
16566 #else
16567 regs.ss = __KERNEL_DS;
16568 #endif
16569 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16570
16571 return ret;
16572 }
16573 -void stop_this_cpu(void *dummy)
16574 +__noreturn void stop_this_cpu(void *dummy)
16575 {
16576 local_irq_disable();
16577 /*
16578 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16579 }
16580 early_param("idle", idle_setup);
16581
16582 -unsigned long arch_align_stack(unsigned long sp)
16583 +#ifdef CONFIG_PAX_RANDKSTACK
16584 +void pax_randomize_kstack(struct pt_regs *regs)
16585 {
16586 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16587 - sp -= get_random_int() % 8192;
16588 - return sp & ~0xf;
16589 -}
16590 + struct thread_struct *thread = &current->thread;
16591 + unsigned long time;
16592
16593 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16594 -{
16595 - unsigned long range_end = mm->brk + 0x02000000;
16596 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16597 -}
16598 + if (!randomize_va_space)
16599 + return;
16600 +
16601 + if (v8086_mode(regs))
16602 + return;
16603
16604 + rdtscl(time);
16605 +
16606 + /* P4 seems to return a 0 LSB, ignore it */
16607 +#ifdef CONFIG_MPENTIUM4
16608 + time &= 0x3EUL;
16609 + time <<= 2;
16610 +#elif defined(CONFIG_X86_64)
16611 + time &= 0xFUL;
16612 + time <<= 4;
16613 +#else
16614 + time &= 0x1FUL;
16615 + time <<= 3;
16616 +#endif
16617 +
16618 + thread->sp0 ^= time;
16619 + load_sp0(init_tss + smp_processor_id(), thread);
16620 +
16621 +#ifdef CONFIG_X86_64
16622 + percpu_write(kernel_stack, thread->sp0);
16623 +#endif
16624 +}
16625 +#endif
16626 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16627 index 795b79f..063767a 100644
16628 --- a/arch/x86/kernel/process_32.c
16629 +++ b/arch/x86/kernel/process_32.c
16630 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16631 unsigned long thread_saved_pc(struct task_struct *tsk)
16632 {
16633 return ((unsigned long *)tsk->thread.sp)[3];
16634 +//XXX return tsk->thread.eip;
16635 }
16636
16637 #ifndef CONFIG_SMP
16638 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16639 unsigned long sp;
16640 unsigned short ss, gs;
16641
16642 - if (user_mode_vm(regs)) {
16643 + if (user_mode(regs)) {
16644 sp = regs->sp;
16645 ss = regs->ss & 0xffff;
16646 - gs = get_user_gs(regs);
16647 } else {
16648 sp = kernel_stack_pointer(regs);
16649 savesegment(ss, ss);
16650 - savesegment(gs, gs);
16651 }
16652 + gs = get_user_gs(regs);
16653
16654 show_regs_common();
16655
16656 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16657 struct task_struct *tsk;
16658 int err;
16659
16660 - childregs = task_pt_regs(p);
16661 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16662 *childregs = *regs;
16663 childregs->ax = 0;
16664 childregs->sp = sp;
16665
16666 p->thread.sp = (unsigned long) childregs;
16667 p->thread.sp0 = (unsigned long) (childregs+1);
16668 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16669
16670 p->thread.ip = (unsigned long) ret_from_fork;
16671
16672 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16673 struct thread_struct *prev = &prev_p->thread,
16674 *next = &next_p->thread;
16675 int cpu = smp_processor_id();
16676 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16677 + struct tss_struct *tss = init_tss + cpu;
16678 bool preload_fpu;
16679
16680 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16681 @@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16682 */
16683 lazy_save_gs(prev->gs);
16684
16685 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16686 + __set_fs(task_thread_info(next_p)->addr_limit);
16687 +#endif
16688 +
16689 /*
16690 * Load the per-thread Thread-Local Storage descriptor.
16691 */
16692 @@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16693 */
16694 arch_end_context_switch(next_p);
16695
16696 + percpu_write(current_task, next_p);
16697 + percpu_write(current_tinfo, &next_p->tinfo);
16698 +
16699 if (preload_fpu)
16700 __math_state_restore();
16701
16702 @@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16703 if (prev->gs | next->gs)
16704 lazy_load_gs(next->gs);
16705
16706 - percpu_write(current_task, next_p);
16707 -
16708 return prev_p;
16709 }
16710
16711 @@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16712 } while (count++ < 16);
16713 return 0;
16714 }
16715 -
16716 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16717 index 3bd7e6e..90b2bcf 100644
16718 --- a/arch/x86/kernel/process_64.c
16719 +++ b/arch/x86/kernel/process_64.c
16720 @@ -89,7 +89,7 @@ static void __exit_idle(void)
16721 void exit_idle(void)
16722 {
16723 /* idle loop has pid 0 */
16724 - if (current->pid)
16725 + if (task_pid_nr(current))
16726 return;
16727 __exit_idle();
16728 }
16729 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16730 struct pt_regs *childregs;
16731 struct task_struct *me = current;
16732
16733 - childregs = ((struct pt_regs *)
16734 - (THREAD_SIZE + task_stack_page(p))) - 1;
16735 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16736 *childregs = *regs;
16737
16738 childregs->ax = 0;
16739 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16740 p->thread.sp = (unsigned long) childregs;
16741 p->thread.sp0 = (unsigned long) (childregs+1);
16742 p->thread.usersp = me->thread.usersp;
16743 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16744
16745 set_tsk_thread_flag(p, TIF_FORK);
16746
16747 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16748 struct thread_struct *prev = &prev_p->thread;
16749 struct thread_struct *next = &next_p->thread;
16750 int cpu = smp_processor_id();
16751 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16752 + struct tss_struct *tss = init_tss + cpu;
16753 unsigned fsindex, gsindex;
16754 bool preload_fpu;
16755
16756 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16757 prev->usersp = percpu_read(old_rsp);
16758 percpu_write(old_rsp, next->usersp);
16759 percpu_write(current_task, next_p);
16760 + percpu_write(current_tinfo, &next_p->tinfo);
16761
16762 - percpu_write(kernel_stack,
16763 - (unsigned long)task_stack_page(next_p) +
16764 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16765 + percpu_write(kernel_stack, next->sp0);
16766
16767 /*
16768 * Now maybe reload the debug registers and handle I/O bitmaps
16769 @@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16770 if (!p || p == current || p->state == TASK_RUNNING)
16771 return 0;
16772 stack = (unsigned long)task_stack_page(p);
16773 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16774 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16775 return 0;
16776 fp = *(u64 *)(p->thread.sp);
16777 do {
16778 - if (fp < (unsigned long)stack ||
16779 - fp >= (unsigned long)stack+THREAD_SIZE)
16780 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16781 return 0;
16782 ip = *(u64 *)(fp+8);
16783 if (!in_sched_functions(ip))
16784 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16785 index 8252879..d3219e0 100644
16786 --- a/arch/x86/kernel/ptrace.c
16787 +++ b/arch/x86/kernel/ptrace.c
16788 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16789 unsigned long addr, unsigned long data)
16790 {
16791 int ret;
16792 - unsigned long __user *datap = (unsigned long __user *)data;
16793 + unsigned long __user *datap = (__force unsigned long __user *)data;
16794
16795 switch (request) {
16796 /* read the word at location addr in the USER area. */
16797 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16798 if ((int) addr < 0)
16799 return -EIO;
16800 ret = do_get_thread_area(child, addr,
16801 - (struct user_desc __user *)data);
16802 + (__force struct user_desc __user *) data);
16803 break;
16804
16805 case PTRACE_SET_THREAD_AREA:
16806 if ((int) addr < 0)
16807 return -EIO;
16808 ret = do_set_thread_area(child, addr,
16809 - (struct user_desc __user *)data, 0);
16810 + (__force struct user_desc __user *) data, 0);
16811 break;
16812 #endif
16813
16814 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16815 memset(info, 0, sizeof(*info));
16816 info->si_signo = SIGTRAP;
16817 info->si_code = si_code;
16818 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16819 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16820 }
16821
16822 void user_single_step_siginfo(struct task_struct *tsk,
16823 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16824 index 42eb330..139955c 100644
16825 --- a/arch/x86/kernel/pvclock.c
16826 +++ b/arch/x86/kernel/pvclock.c
16827 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16828 return pv_tsc_khz;
16829 }
16830
16831 -static atomic64_t last_value = ATOMIC64_INIT(0);
16832 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16833
16834 void pvclock_resume(void)
16835 {
16836 - atomic64_set(&last_value, 0);
16837 + atomic64_set_unchecked(&last_value, 0);
16838 }
16839
16840 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16841 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16842 * updating at the same time, and one of them could be slightly behind,
16843 * making the assumption that last_value always go forward fail to hold.
16844 */
16845 - last = atomic64_read(&last_value);
16846 + last = atomic64_read_unchecked(&last_value);
16847 do {
16848 if (ret < last)
16849 return last;
16850 - last = atomic64_cmpxchg(&last_value, last, ret);
16851 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16852 } while (unlikely(last != ret));
16853
16854 return ret;
16855 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16856 index 37a458b..e63d183 100644
16857 --- a/arch/x86/kernel/reboot.c
16858 +++ b/arch/x86/kernel/reboot.c
16859 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16860 EXPORT_SYMBOL(pm_power_off);
16861
16862 static const struct desc_ptr no_idt = {};
16863 -static int reboot_mode;
16864 +static unsigned short reboot_mode;
16865 enum reboot_type reboot_type = BOOT_ACPI;
16866 int reboot_force;
16867
16868 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16869 extern const unsigned char machine_real_restart_asm[];
16870 extern const u64 machine_real_restart_gdt[3];
16871
16872 -void machine_real_restart(unsigned int type)
16873 +__noreturn void machine_real_restart(unsigned int type)
16874 {
16875 void *restart_va;
16876 unsigned long restart_pa;
16877 - void (*restart_lowmem)(unsigned int);
16878 + void (* __noreturn restart_lowmem)(unsigned int);
16879 u64 *lowmem_gdt;
16880
16881 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16882 + struct desc_struct *gdt;
16883 +#endif
16884 +
16885 local_irq_disable();
16886
16887 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16888 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16889 boot)". This seems like a fairly standard thing that gets set by
16890 REBOOT.COM programs, and the previous reset routine did this
16891 too. */
16892 - *((unsigned short *)0x472) = reboot_mode;
16893 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16894
16895 /* Patch the GDT in the low memory trampoline */
16896 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16897
16898 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16899 restart_pa = virt_to_phys(restart_va);
16900 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16901 + restart_lowmem = (void *)restart_pa;
16902
16903 /* GDT[0]: GDT self-pointer */
16904 lowmem_gdt[0] =
16905 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16906 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16907
16908 /* Jump to the identity-mapped low memory code */
16909 +
16910 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16911 + gdt = get_cpu_gdt_table(smp_processor_id());
16912 + pax_open_kernel();
16913 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16914 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16915 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16916 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16917 +#endif
16918 +#ifdef CONFIG_PAX_KERNEXEC
16919 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16920 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16921 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16922 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16923 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16924 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16925 +#endif
16926 + pax_close_kernel();
16927 +#endif
16928 +
16929 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16930 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16931 + unreachable();
16932 +#else
16933 restart_lowmem(type);
16934 +#endif
16935 +
16936 }
16937 #ifdef CONFIG_APM_MODULE
16938 EXPORT_SYMBOL(machine_real_restart);
16939 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16940 * try to force a triple fault and then cycle between hitting the keyboard
16941 * controller and doing that
16942 */
16943 -static void native_machine_emergency_restart(void)
16944 +__noreturn static void native_machine_emergency_restart(void)
16945 {
16946 int i;
16947 int attempt = 0;
16948 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16949 #endif
16950 }
16951
16952 -static void __machine_emergency_restart(int emergency)
16953 +static __noreturn void __machine_emergency_restart(int emergency)
16954 {
16955 reboot_emergency = emergency;
16956 machine_ops.emergency_restart();
16957 }
16958
16959 -static void native_machine_restart(char *__unused)
16960 +static __noreturn void native_machine_restart(char *__unused)
16961 {
16962 printk("machine restart\n");
16963
16964 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16965 __machine_emergency_restart(0);
16966 }
16967
16968 -static void native_machine_halt(void)
16969 +static __noreturn void native_machine_halt(void)
16970 {
16971 /* stop other cpus and apics */
16972 machine_shutdown();
16973 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
16974 stop_this_cpu(NULL);
16975 }
16976
16977 -static void native_machine_power_off(void)
16978 +__noreturn static void native_machine_power_off(void)
16979 {
16980 if (pm_power_off) {
16981 if (!reboot_force)
16982 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16983 }
16984 /* a fallback in case there is no PM info available */
16985 tboot_shutdown(TB_SHUTDOWN_HALT);
16986 + unreachable();
16987 }
16988
16989 struct machine_ops machine_ops = {
16990 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16991 index 7a6f3b3..bed145d7 100644
16992 --- a/arch/x86/kernel/relocate_kernel_64.S
16993 +++ b/arch/x86/kernel/relocate_kernel_64.S
16994 @@ -11,6 +11,7 @@
16995 #include <asm/kexec.h>
16996 #include <asm/processor-flags.h>
16997 #include <asm/pgtable_types.h>
16998 +#include <asm/alternative-asm.h>
16999
17000 /*
17001 * Must be relocatable PIC code callable as a C function
17002 @@ -160,13 +161,14 @@ identity_mapped:
17003 xorq %rbp, %rbp
17004 xorq %r8, %r8
17005 xorq %r9, %r9
17006 - xorq %r10, %r9
17007 + xorq %r10, %r10
17008 xorq %r11, %r11
17009 xorq %r12, %r12
17010 xorq %r13, %r13
17011 xorq %r14, %r14
17012 xorq %r15, %r15
17013
17014 + pax_force_retaddr 0, 1
17015 ret
17016
17017 1:
17018 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
17019 index cf0ef98..e3f780b 100644
17020 --- a/arch/x86/kernel/setup.c
17021 +++ b/arch/x86/kernel/setup.c
17022 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
17023
17024 switch (data->type) {
17025 case SETUP_E820_EXT:
17026 - parse_e820_ext(data);
17027 + parse_e820_ext((struct setup_data __force_kernel *)data);
17028 break;
17029 case SETUP_DTB:
17030 add_dtb(pa_data);
17031 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
17032 * area (640->1Mb) as ram even though it is not.
17033 * take them out.
17034 */
17035 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
17036 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
17037 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
17038 }
17039
17040 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
17041
17042 if (!boot_params.hdr.root_flags)
17043 root_mountflags &= ~MS_RDONLY;
17044 - init_mm.start_code = (unsigned long) _text;
17045 - init_mm.end_code = (unsigned long) _etext;
17046 + init_mm.start_code = ktla_ktva((unsigned long) _text);
17047 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
17048 init_mm.end_data = (unsigned long) _edata;
17049 init_mm.brk = _brk_end;
17050
17051 - code_resource.start = virt_to_phys(_text);
17052 - code_resource.end = virt_to_phys(_etext)-1;
17053 - data_resource.start = virt_to_phys(_etext);
17054 + code_resource.start = virt_to_phys(ktla_ktva(_text));
17055 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
17056 + data_resource.start = virt_to_phys(_sdata);
17057 data_resource.end = virt_to_phys(_edata)-1;
17058 bss_resource.start = virt_to_phys(&__bss_start);
17059 bss_resource.end = virt_to_phys(&__bss_stop)-1;
17060 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
17061 index 71f4727..16dc9f7 100644
17062 --- a/arch/x86/kernel/setup_percpu.c
17063 +++ b/arch/x86/kernel/setup_percpu.c
17064 @@ -21,19 +21,17 @@
17065 #include <asm/cpu.h>
17066 #include <asm/stackprotector.h>
17067
17068 -DEFINE_PER_CPU(int, cpu_number);
17069 +#ifdef CONFIG_SMP
17070 +DEFINE_PER_CPU(unsigned int, cpu_number);
17071 EXPORT_PER_CPU_SYMBOL(cpu_number);
17072 +#endif
17073
17074 -#ifdef CONFIG_X86_64
17075 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
17076 -#else
17077 -#define BOOT_PERCPU_OFFSET 0
17078 -#endif
17079
17080 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
17081 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
17082
17083 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
17084 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
17085 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
17086 };
17087 EXPORT_SYMBOL(__per_cpu_offset);
17088 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
17089 {
17090 #ifdef CONFIG_X86_32
17091 struct desc_struct gdt;
17092 + unsigned long base = per_cpu_offset(cpu);
17093
17094 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
17095 - 0x2 | DESCTYPE_S, 0x8);
17096 - gdt.s = 1;
17097 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
17098 + 0x83 | DESCTYPE_S, 0xC);
17099 write_gdt_entry(get_cpu_gdt_table(cpu),
17100 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
17101 #endif
17102 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
17103 /* alrighty, percpu areas up and running */
17104 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
17105 for_each_possible_cpu(cpu) {
17106 +#ifdef CONFIG_CC_STACKPROTECTOR
17107 +#ifdef CONFIG_X86_32
17108 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
17109 +#endif
17110 +#endif
17111 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
17112 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
17113 per_cpu(cpu_number, cpu) = cpu;
17114 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
17115 */
17116 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
17117 #endif
17118 +#ifdef CONFIG_CC_STACKPROTECTOR
17119 +#ifdef CONFIG_X86_32
17120 + if (!cpu)
17121 + per_cpu(stack_canary.canary, cpu) = canary;
17122 +#endif
17123 +#endif
17124 /*
17125 * Up to this point, the boot CPU has been using .init.data
17126 * area. Reload any changed state for the boot CPU.
17127 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
17128 index 54ddaeb2..22c3bdc 100644
17129 --- a/arch/x86/kernel/signal.c
17130 +++ b/arch/x86/kernel/signal.c
17131 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
17132 * Align the stack pointer according to the i386 ABI,
17133 * i.e. so that on function entry ((sp + 4) & 15) == 0.
17134 */
17135 - sp = ((sp + 4) & -16ul) - 4;
17136 + sp = ((sp - 12) & -16ul) - 4;
17137 #else /* !CONFIG_X86_32 */
17138 sp = round_down(sp, 16) - 8;
17139 #endif
17140 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
17141 * Return an always-bogus address instead so we will die with SIGSEGV.
17142 */
17143 if (onsigstack && !likely(on_sig_stack(sp)))
17144 - return (void __user *)-1L;
17145 + return (__force void __user *)-1L;
17146
17147 /* save i387 state */
17148 if (used_math() && save_i387_xstate(*fpstate) < 0)
17149 - return (void __user *)-1L;
17150 + return (__force void __user *)-1L;
17151
17152 return (void __user *)sp;
17153 }
17154 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17155 }
17156
17157 if (current->mm->context.vdso)
17158 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17159 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17160 else
17161 - restorer = &frame->retcode;
17162 + restorer = (void __user *)&frame->retcode;
17163 if (ka->sa.sa_flags & SA_RESTORER)
17164 restorer = ka->sa.sa_restorer;
17165
17166 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17167 * reasons and because gdb uses it as a signature to notice
17168 * signal handler stack frames.
17169 */
17170 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
17171 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
17172
17173 if (err)
17174 return -EFAULT;
17175 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17176 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
17177
17178 /* Set up to return from userspace. */
17179 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17180 + if (current->mm->context.vdso)
17181 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17182 + else
17183 + restorer = (void __user *)&frame->retcode;
17184 if (ka->sa.sa_flags & SA_RESTORER)
17185 restorer = ka->sa.sa_restorer;
17186 put_user_ex(restorer, &frame->pretcode);
17187 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17188 * reasons and because gdb uses it as a signature to notice
17189 * signal handler stack frames.
17190 */
17191 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
17192 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
17193 } put_user_catch(err);
17194
17195 if (err)
17196 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
17197 * X86_32: vm86 regs switched out by assembly code before reaching
17198 * here, so testing against kernel CS suffices.
17199 */
17200 - if (!user_mode(regs))
17201 + if (!user_mode_novm(regs))
17202 return;
17203
17204 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
17205 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
17206 index 9f548cb..caf76f7 100644
17207 --- a/arch/x86/kernel/smpboot.c
17208 +++ b/arch/x86/kernel/smpboot.c
17209 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
17210 set_idle_for_cpu(cpu, c_idle.idle);
17211 do_rest:
17212 per_cpu(current_task, cpu) = c_idle.idle;
17213 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
17214 #ifdef CONFIG_X86_32
17215 /* Stack for startup_32 can be just as for start_secondary onwards */
17216 irq_ctx_init(cpu);
17217 #else
17218 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
17219 initial_gs = per_cpu_offset(cpu);
17220 - per_cpu(kernel_stack, cpu) =
17221 - (unsigned long)task_stack_page(c_idle.idle) -
17222 - KERNEL_STACK_OFFSET + THREAD_SIZE;
17223 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
17224 #endif
17225 +
17226 + pax_open_kernel();
17227 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17228 + pax_close_kernel();
17229 +
17230 initial_code = (unsigned long)start_secondary;
17231 stack_start = c_idle.idle->thread.sp;
17232
17233 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
17234
17235 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
17236
17237 +#ifdef CONFIG_PAX_PER_CPU_PGD
17238 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
17239 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17240 + KERNEL_PGD_PTRS);
17241 +#endif
17242 +
17243 err = do_boot_cpu(apicid, cpu);
17244 if (err) {
17245 pr_debug("do_boot_cpu failed %d\n", err);
17246 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
17247 index c346d11..d43b163 100644
17248 --- a/arch/x86/kernel/step.c
17249 +++ b/arch/x86/kernel/step.c
17250 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17251 struct desc_struct *desc;
17252 unsigned long base;
17253
17254 - seg &= ~7UL;
17255 + seg >>= 3;
17256
17257 mutex_lock(&child->mm->context.lock);
17258 - if (unlikely((seg >> 3) >= child->mm->context.size))
17259 + if (unlikely(seg >= child->mm->context.size))
17260 addr = -1L; /* bogus selector, access would fault */
17261 else {
17262 desc = child->mm->context.ldt + seg;
17263 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17264 addr += base;
17265 }
17266 mutex_unlock(&child->mm->context.lock);
17267 - }
17268 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17269 + addr = ktla_ktva(addr);
17270
17271 return addr;
17272 }
17273 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17274 unsigned char opcode[15];
17275 unsigned long addr = convert_ip_to_linear(child, regs);
17276
17277 + if (addr == -EINVAL)
17278 + return 0;
17279 +
17280 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17281 for (i = 0; i < copied; i++) {
17282 switch (opcode[i]) {
17283 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17284 index 0b0cb5f..db6b9ed 100644
17285 --- a/arch/x86/kernel/sys_i386_32.c
17286 +++ b/arch/x86/kernel/sys_i386_32.c
17287 @@ -24,17 +24,224 @@
17288
17289 #include <asm/syscalls.h>
17290
17291 -/*
17292 - * Do a system call from kernel instead of calling sys_execve so we
17293 - * end up with proper pt_regs.
17294 - */
17295 -int kernel_execve(const char *filename,
17296 - const char *const argv[],
17297 - const char *const envp[])
17298 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17299 {
17300 - long __res;
17301 - asm volatile ("int $0x80"
17302 - : "=a" (__res)
17303 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17304 - return __res;
17305 + unsigned long pax_task_size = TASK_SIZE;
17306 +
17307 +#ifdef CONFIG_PAX_SEGMEXEC
17308 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17309 + pax_task_size = SEGMEXEC_TASK_SIZE;
17310 +#endif
17311 +
17312 + if (len > pax_task_size || addr > pax_task_size - len)
17313 + return -EINVAL;
17314 +
17315 + return 0;
17316 +}
17317 +
17318 +unsigned long
17319 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17320 + unsigned long len, unsigned long pgoff, unsigned long flags)
17321 +{
17322 + struct mm_struct *mm = current->mm;
17323 + struct vm_area_struct *vma;
17324 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17325 +
17326 +#ifdef CONFIG_PAX_SEGMEXEC
17327 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17328 + pax_task_size = SEGMEXEC_TASK_SIZE;
17329 +#endif
17330 +
17331 + pax_task_size -= PAGE_SIZE;
17332 +
17333 + if (len > pax_task_size)
17334 + return -ENOMEM;
17335 +
17336 + if (flags & MAP_FIXED)
17337 + return addr;
17338 +
17339 +#ifdef CONFIG_PAX_RANDMMAP
17340 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17341 +#endif
17342 +
17343 + if (addr) {
17344 + addr = PAGE_ALIGN(addr);
17345 + if (pax_task_size - len >= addr) {
17346 + vma = find_vma(mm, addr);
17347 + if (check_heap_stack_gap(vma, addr, len))
17348 + return addr;
17349 + }
17350 + }
17351 + if (len > mm->cached_hole_size) {
17352 + start_addr = addr = mm->free_area_cache;
17353 + } else {
17354 + start_addr = addr = mm->mmap_base;
17355 + mm->cached_hole_size = 0;
17356 + }
17357 +
17358 +#ifdef CONFIG_PAX_PAGEEXEC
17359 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17360 + start_addr = 0x00110000UL;
17361 +
17362 +#ifdef CONFIG_PAX_RANDMMAP
17363 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17364 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17365 +#endif
17366 +
17367 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17368 + start_addr = addr = mm->mmap_base;
17369 + else
17370 + addr = start_addr;
17371 + }
17372 +#endif
17373 +
17374 +full_search:
17375 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17376 + /* At this point: (!vma || addr < vma->vm_end). */
17377 + if (pax_task_size - len < addr) {
17378 + /*
17379 + * Start a new search - just in case we missed
17380 + * some holes.
17381 + */
17382 + if (start_addr != mm->mmap_base) {
17383 + start_addr = addr = mm->mmap_base;
17384 + mm->cached_hole_size = 0;
17385 + goto full_search;
17386 + }
17387 + return -ENOMEM;
17388 + }
17389 + if (check_heap_stack_gap(vma, addr, len))
17390 + break;
17391 + if (addr + mm->cached_hole_size < vma->vm_start)
17392 + mm->cached_hole_size = vma->vm_start - addr;
17393 + addr = vma->vm_end;
17394 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17395 + start_addr = addr = mm->mmap_base;
17396 + mm->cached_hole_size = 0;
17397 + goto full_search;
17398 + }
17399 + }
17400 +
17401 + /*
17402 + * Remember the place where we stopped the search:
17403 + */
17404 + mm->free_area_cache = addr + len;
17405 + return addr;
17406 +}
17407 +
17408 +unsigned long
17409 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17410 + const unsigned long len, const unsigned long pgoff,
17411 + const unsigned long flags)
17412 +{
17413 + struct vm_area_struct *vma;
17414 + struct mm_struct *mm = current->mm;
17415 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17416 +
17417 +#ifdef CONFIG_PAX_SEGMEXEC
17418 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17419 + pax_task_size = SEGMEXEC_TASK_SIZE;
17420 +#endif
17421 +
17422 + pax_task_size -= PAGE_SIZE;
17423 +
17424 + /* requested length too big for entire address space */
17425 + if (len > pax_task_size)
17426 + return -ENOMEM;
17427 +
17428 + if (flags & MAP_FIXED)
17429 + return addr;
17430 +
17431 +#ifdef CONFIG_PAX_PAGEEXEC
17432 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17433 + goto bottomup;
17434 +#endif
17435 +
17436 +#ifdef CONFIG_PAX_RANDMMAP
17437 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17438 +#endif
17439 +
17440 + /* requesting a specific address */
17441 + if (addr) {
17442 + addr = PAGE_ALIGN(addr);
17443 + if (pax_task_size - len >= addr) {
17444 + vma = find_vma(mm, addr);
17445 + if (check_heap_stack_gap(vma, addr, len))
17446 + return addr;
17447 + }
17448 + }
17449 +
17450 + /* check if free_area_cache is useful for us */
17451 + if (len <= mm->cached_hole_size) {
17452 + mm->cached_hole_size = 0;
17453 + mm->free_area_cache = mm->mmap_base;
17454 + }
17455 +
17456 + /* either no address requested or can't fit in requested address hole */
17457 + addr = mm->free_area_cache;
17458 +
17459 + /* make sure it can fit in the remaining address space */
17460 + if (addr > len) {
17461 + vma = find_vma(mm, addr-len);
17462 + if (check_heap_stack_gap(vma, addr - len, len))
17463 + /* remember the address as a hint for next time */
17464 + return (mm->free_area_cache = addr-len);
17465 + }
17466 +
17467 + if (mm->mmap_base < len)
17468 + goto bottomup;
17469 +
17470 + addr = mm->mmap_base-len;
17471 +
17472 + do {
17473 + /*
17474 + * Lookup failure means no vma is above this address,
17475 + * else if new region fits below vma->vm_start,
17476 + * return with success:
17477 + */
17478 + vma = find_vma(mm, addr);
17479 + if (check_heap_stack_gap(vma, addr, len))
17480 + /* remember the address as a hint for next time */
17481 + return (mm->free_area_cache = addr);
17482 +
17483 + /* remember the largest hole we saw so far */
17484 + if (addr + mm->cached_hole_size < vma->vm_start)
17485 + mm->cached_hole_size = vma->vm_start - addr;
17486 +
17487 + /* try just below the current vma->vm_start */
17488 + addr = skip_heap_stack_gap(vma, len);
17489 + } while (!IS_ERR_VALUE(addr));
17490 +
17491 +bottomup:
17492 + /*
17493 + * A failed mmap() very likely causes application failure,
17494 + * so fall back to the bottom-up function here. This scenario
17495 + * can happen with large stack limits and large mmap()
17496 + * allocations.
17497 + */
17498 +
17499 +#ifdef CONFIG_PAX_SEGMEXEC
17500 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17501 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17502 + else
17503 +#endif
17504 +
17505 + mm->mmap_base = TASK_UNMAPPED_BASE;
17506 +
17507 +#ifdef CONFIG_PAX_RANDMMAP
17508 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17509 + mm->mmap_base += mm->delta_mmap;
17510 +#endif
17511 +
17512 + mm->free_area_cache = mm->mmap_base;
17513 + mm->cached_hole_size = ~0UL;
17514 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17515 + /*
17516 + * Restore the topdown base:
17517 + */
17518 + mm->mmap_base = base;
17519 + mm->free_area_cache = base;
17520 + mm->cached_hole_size = ~0UL;
17521 +
17522 + return addr;
17523 }
17524 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17525 index 0514890..3dbebce 100644
17526 --- a/arch/x86/kernel/sys_x86_64.c
17527 +++ b/arch/x86/kernel/sys_x86_64.c
17528 @@ -95,8 +95,8 @@ out:
17529 return error;
17530 }
17531
17532 -static void find_start_end(unsigned long flags, unsigned long *begin,
17533 - unsigned long *end)
17534 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17535 + unsigned long *begin, unsigned long *end)
17536 {
17537 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17538 unsigned long new_begin;
17539 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17540 *begin = new_begin;
17541 }
17542 } else {
17543 - *begin = TASK_UNMAPPED_BASE;
17544 + *begin = mm->mmap_base;
17545 *end = TASK_SIZE;
17546 }
17547 }
17548 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17549 if (flags & MAP_FIXED)
17550 return addr;
17551
17552 - find_start_end(flags, &begin, &end);
17553 + find_start_end(mm, flags, &begin, &end);
17554
17555 if (len > end)
17556 return -ENOMEM;
17557
17558 +#ifdef CONFIG_PAX_RANDMMAP
17559 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17560 +#endif
17561 +
17562 if (addr) {
17563 addr = PAGE_ALIGN(addr);
17564 vma = find_vma(mm, addr);
17565 - if (end - len >= addr &&
17566 - (!vma || addr + len <= vma->vm_start))
17567 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17568 return addr;
17569 }
17570 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17571 @@ -172,7 +175,7 @@ full_search:
17572 }
17573 return -ENOMEM;
17574 }
17575 - if (!vma || addr + len <= vma->vm_start) {
17576 + if (check_heap_stack_gap(vma, addr, len)) {
17577 /*
17578 * Remember the place where we stopped the search:
17579 */
17580 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17581 {
17582 struct vm_area_struct *vma;
17583 struct mm_struct *mm = current->mm;
17584 - unsigned long addr = addr0;
17585 + unsigned long base = mm->mmap_base, addr = addr0;
17586
17587 /* requested length too big for entire address space */
17588 if (len > TASK_SIZE)
17589 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17590 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17591 goto bottomup;
17592
17593 +#ifdef CONFIG_PAX_RANDMMAP
17594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17595 +#endif
17596 +
17597 /* requesting a specific address */
17598 if (addr) {
17599 addr = PAGE_ALIGN(addr);
17600 - vma = find_vma(mm, addr);
17601 - if (TASK_SIZE - len >= addr &&
17602 - (!vma || addr + len <= vma->vm_start))
17603 - return addr;
17604 + if (TASK_SIZE - len >= addr) {
17605 + vma = find_vma(mm, addr);
17606 + if (check_heap_stack_gap(vma, addr, len))
17607 + return addr;
17608 + }
17609 }
17610
17611 /* check if free_area_cache is useful for us */
17612 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17613 ALIGN_TOPDOWN);
17614
17615 vma = find_vma(mm, tmp_addr);
17616 - if (!vma || tmp_addr + len <= vma->vm_start)
17617 + if (check_heap_stack_gap(vma, tmp_addr, len))
17618 /* remember the address as a hint for next time */
17619 return mm->free_area_cache = tmp_addr;
17620 }
17621 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17622 * return with success:
17623 */
17624 vma = find_vma(mm, addr);
17625 - if (!vma || addr+len <= vma->vm_start)
17626 + if (check_heap_stack_gap(vma, addr, len))
17627 /* remember the address as a hint for next time */
17628 return mm->free_area_cache = addr;
17629
17630 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17631 mm->cached_hole_size = vma->vm_start - addr;
17632
17633 /* try just below the current vma->vm_start */
17634 - addr = vma->vm_start-len;
17635 - } while (len < vma->vm_start);
17636 + addr = skip_heap_stack_gap(vma, len);
17637 + } while (!IS_ERR_VALUE(addr));
17638
17639 bottomup:
17640 /*
17641 @@ -270,13 +278,21 @@ bottomup:
17642 * can happen with large stack limits and large mmap()
17643 * allocations.
17644 */
17645 + mm->mmap_base = TASK_UNMAPPED_BASE;
17646 +
17647 +#ifdef CONFIG_PAX_RANDMMAP
17648 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17649 + mm->mmap_base += mm->delta_mmap;
17650 +#endif
17651 +
17652 + mm->free_area_cache = mm->mmap_base;
17653 mm->cached_hole_size = ~0UL;
17654 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17655 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17656 /*
17657 * Restore the topdown base:
17658 */
17659 - mm->free_area_cache = mm->mmap_base;
17660 + mm->mmap_base = base;
17661 + mm->free_area_cache = base;
17662 mm->cached_hole_size = ~0UL;
17663
17664 return addr;
17665 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17666 index 9a0e312..e6f66f2 100644
17667 --- a/arch/x86/kernel/syscall_table_32.S
17668 +++ b/arch/x86/kernel/syscall_table_32.S
17669 @@ -1,3 +1,4 @@
17670 +.section .rodata,"a",@progbits
17671 ENTRY(sys_call_table)
17672 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17673 .long sys_exit
17674 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17675 index e2410e2..4fe3fbc 100644
17676 --- a/arch/x86/kernel/tboot.c
17677 +++ b/arch/x86/kernel/tboot.c
17678 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17679
17680 void tboot_shutdown(u32 shutdown_type)
17681 {
17682 - void (*shutdown)(void);
17683 + void (* __noreturn shutdown)(void);
17684
17685 if (!tboot_enabled())
17686 return;
17687 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17688
17689 switch_to_tboot_pt();
17690
17691 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17692 + shutdown = (void *)tboot->shutdown_entry;
17693 shutdown();
17694
17695 /* should not reach here */
17696 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17697 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17698 }
17699
17700 -static atomic_t ap_wfs_count;
17701 +static atomic_unchecked_t ap_wfs_count;
17702
17703 static int tboot_wait_for_aps(int num_aps)
17704 {
17705 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17706 {
17707 switch (action) {
17708 case CPU_DYING:
17709 - atomic_inc(&ap_wfs_count);
17710 + atomic_inc_unchecked(&ap_wfs_count);
17711 if (num_online_cpus() == 1)
17712 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17713 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17714 return NOTIFY_BAD;
17715 break;
17716 }
17717 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17718
17719 tboot_create_trampoline();
17720
17721 - atomic_set(&ap_wfs_count, 0);
17722 + atomic_set_unchecked(&ap_wfs_count, 0);
17723 register_hotcpu_notifier(&tboot_cpu_notifier);
17724 return 0;
17725 }
17726 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17727 index dd5fbf4..b7f2232 100644
17728 --- a/arch/x86/kernel/time.c
17729 +++ b/arch/x86/kernel/time.c
17730 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17731 {
17732 unsigned long pc = instruction_pointer(regs);
17733
17734 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17735 + if (!user_mode(regs) && in_lock_functions(pc)) {
17736 #ifdef CONFIG_FRAME_POINTER
17737 - return *(unsigned long *)(regs->bp + sizeof(long));
17738 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17739 #else
17740 unsigned long *sp =
17741 (unsigned long *)kernel_stack_pointer(regs);
17742 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17743 * or above a saved flags. Eflags has bits 22-31 zero,
17744 * kernel addresses don't.
17745 */
17746 +
17747 +#ifdef CONFIG_PAX_KERNEXEC
17748 + return ktla_ktva(sp[0]);
17749 +#else
17750 if (sp[0] >> 22)
17751 return sp[0];
17752 if (sp[1] >> 22)
17753 return sp[1];
17754 #endif
17755 +
17756 +#endif
17757 }
17758 return pc;
17759 }
17760 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17761 index 6bb7b85..dd853e1 100644
17762 --- a/arch/x86/kernel/tls.c
17763 +++ b/arch/x86/kernel/tls.c
17764 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17765 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17766 return -EINVAL;
17767
17768 +#ifdef CONFIG_PAX_SEGMEXEC
17769 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17770 + return -EINVAL;
17771 +#endif
17772 +
17773 set_tls_desc(p, idx, &info, 1);
17774
17775 return 0;
17776 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17777 index 451c0a7..e57f551 100644
17778 --- a/arch/x86/kernel/trampoline_32.S
17779 +++ b/arch/x86/kernel/trampoline_32.S
17780 @@ -32,6 +32,12 @@
17781 #include <asm/segment.h>
17782 #include <asm/page_types.h>
17783
17784 +#ifdef CONFIG_PAX_KERNEXEC
17785 +#define ta(X) (X)
17786 +#else
17787 +#define ta(X) ((X) - __PAGE_OFFSET)
17788 +#endif
17789 +
17790 #ifdef CONFIG_SMP
17791
17792 .section ".x86_trampoline","a"
17793 @@ -62,7 +68,7 @@ r_base = .
17794 inc %ax # protected mode (PE) bit
17795 lmsw %ax # into protected mode
17796 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17797 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17798 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17799
17800 # These need to be in the same 64K segment as the above;
17801 # hence we don't use the boot_gdt_descr defined in head.S
17802 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17803 index 09ff517..df19fbff 100644
17804 --- a/arch/x86/kernel/trampoline_64.S
17805 +++ b/arch/x86/kernel/trampoline_64.S
17806 @@ -90,7 +90,7 @@ startup_32:
17807 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17808 movl %eax, %ds
17809
17810 - movl $X86_CR4_PAE, %eax
17811 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17812 movl %eax, %cr4 # Enable PAE mode
17813
17814 # Setup trampoline 4 level pagetables
17815 @@ -138,7 +138,7 @@ tidt:
17816 # so the kernel can live anywhere
17817 .balign 4
17818 tgdt:
17819 - .short tgdt_end - tgdt # gdt limit
17820 + .short tgdt_end - tgdt - 1 # gdt limit
17821 .long tgdt - r_base
17822 .short 0
17823 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17824 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17825 index a8e3eb8..c9dbd7d 100644
17826 --- a/arch/x86/kernel/traps.c
17827 +++ b/arch/x86/kernel/traps.c
17828 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17829
17830 /* Do we ignore FPU interrupts ? */
17831 char ignore_fpu_irq;
17832 -
17833 -/*
17834 - * The IDT has to be page-aligned to simplify the Pentium
17835 - * F0 0F bug workaround.
17836 - */
17837 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17838 #endif
17839
17840 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17841 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17842 }
17843
17844 static void __kprobes
17845 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17846 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17847 long error_code, siginfo_t *info)
17848 {
17849 struct task_struct *tsk = current;
17850
17851 #ifdef CONFIG_X86_32
17852 - if (regs->flags & X86_VM_MASK) {
17853 + if (v8086_mode(regs)) {
17854 /*
17855 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17856 * On nmi (interrupt 2), do_trap should not be called.
17857 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17858 }
17859 #endif
17860
17861 - if (!user_mode(regs))
17862 + if (!user_mode_novm(regs))
17863 goto kernel_trap;
17864
17865 #ifdef CONFIG_X86_32
17866 @@ -148,7 +142,7 @@ trap_signal:
17867 printk_ratelimit()) {
17868 printk(KERN_INFO
17869 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17870 - tsk->comm, tsk->pid, str,
17871 + tsk->comm, task_pid_nr(tsk), str,
17872 regs->ip, regs->sp, error_code);
17873 print_vma_addr(" in ", regs->ip);
17874 printk("\n");
17875 @@ -165,8 +159,20 @@ kernel_trap:
17876 if (!fixup_exception(regs)) {
17877 tsk->thread.error_code = error_code;
17878 tsk->thread.trap_no = trapnr;
17879 +
17880 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17881 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17882 + str = "PAX: suspicious stack segment fault";
17883 +#endif
17884 +
17885 die(str, regs, error_code);
17886 }
17887 +
17888 +#ifdef CONFIG_PAX_REFCOUNT
17889 + if (trapnr == 4)
17890 + pax_report_refcount_overflow(regs);
17891 +#endif
17892 +
17893 return;
17894
17895 #ifdef CONFIG_X86_32
17896 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17897 conditional_sti(regs);
17898
17899 #ifdef CONFIG_X86_32
17900 - if (regs->flags & X86_VM_MASK)
17901 + if (v8086_mode(regs))
17902 goto gp_in_vm86;
17903 #endif
17904
17905 tsk = current;
17906 - if (!user_mode(regs))
17907 + if (!user_mode_novm(regs))
17908 goto gp_in_kernel;
17909
17910 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17911 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17912 + struct mm_struct *mm = tsk->mm;
17913 + unsigned long limit;
17914 +
17915 + down_write(&mm->mmap_sem);
17916 + limit = mm->context.user_cs_limit;
17917 + if (limit < TASK_SIZE) {
17918 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17919 + up_write(&mm->mmap_sem);
17920 + return;
17921 + }
17922 + up_write(&mm->mmap_sem);
17923 + }
17924 +#endif
17925 +
17926 tsk->thread.error_code = error_code;
17927 tsk->thread.trap_no = 13;
17928
17929 @@ -295,6 +317,13 @@ gp_in_kernel:
17930 if (notify_die(DIE_GPF, "general protection fault", regs,
17931 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17932 return;
17933 +
17934 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17935 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17936 + die("PAX: suspicious general protection fault", regs, error_code);
17937 + else
17938 +#endif
17939 +
17940 die("general protection fault", regs, error_code);
17941 }
17942
17943 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17944 /* It's safe to allow irq's after DR6 has been saved */
17945 preempt_conditional_sti(regs);
17946
17947 - if (regs->flags & X86_VM_MASK) {
17948 + if (v8086_mode(regs)) {
17949 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17950 error_code, 1);
17951 preempt_conditional_cli(regs);
17952 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17953 * We already checked v86 mode above, so we can check for kernel mode
17954 * by just checking the CPL of CS.
17955 */
17956 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17957 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17958 tsk->thread.debugreg6 &= ~DR_STEP;
17959 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17960 regs->flags &= ~X86_EFLAGS_TF;
17961 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17962 return;
17963 conditional_sti(regs);
17964
17965 - if (!user_mode_vm(regs))
17966 + if (!user_mode(regs))
17967 {
17968 if (!fixup_exception(regs)) {
17969 task->thread.error_code = error_code;
17970 @@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17971 void __math_state_restore(void)
17972 {
17973 struct thread_info *thread = current_thread_info();
17974 - struct task_struct *tsk = thread->task;
17975 + struct task_struct *tsk = current;
17976
17977 /*
17978 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17979 @@ -595,8 +624,7 @@ void __math_state_restore(void)
17980 */
17981 asmlinkage void math_state_restore(void)
17982 {
17983 - struct thread_info *thread = current_thread_info();
17984 - struct task_struct *tsk = thread->task;
17985 + struct task_struct *tsk = current;
17986
17987 if (!tsk_used_math(tsk)) {
17988 local_irq_enable();
17989 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17990 index b9242ba..50c5edd 100644
17991 --- a/arch/x86/kernel/verify_cpu.S
17992 +++ b/arch/x86/kernel/verify_cpu.S
17993 @@ -20,6 +20,7 @@
17994 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17995 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17996 * arch/x86/kernel/head_32.S: processor startup
17997 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17998 *
17999 * verify_cpu, returns the status of longmode and SSE in register %eax.
18000 * 0: Success 1: Failure
18001 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
18002 index 863f875..4307295 100644
18003 --- a/arch/x86/kernel/vm86_32.c
18004 +++ b/arch/x86/kernel/vm86_32.c
18005 @@ -41,6 +41,7 @@
18006 #include <linux/ptrace.h>
18007 #include <linux/audit.h>
18008 #include <linux/stddef.h>
18009 +#include <linux/grsecurity.h>
18010
18011 #include <asm/uaccess.h>
18012 #include <asm/io.h>
18013 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
18014 do_exit(SIGSEGV);
18015 }
18016
18017 - tss = &per_cpu(init_tss, get_cpu());
18018 + tss = init_tss + get_cpu();
18019 current->thread.sp0 = current->thread.saved_sp0;
18020 current->thread.sysenter_cs = __KERNEL_CS;
18021 load_sp0(tss, &current->thread);
18022 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
18023 struct task_struct *tsk;
18024 int tmp, ret = -EPERM;
18025
18026 +#ifdef CONFIG_GRKERNSEC_VM86
18027 + if (!capable(CAP_SYS_RAWIO)) {
18028 + gr_handle_vm86();
18029 + goto out;
18030 + }
18031 +#endif
18032 +
18033 tsk = current;
18034 if (tsk->thread.saved_sp0)
18035 goto out;
18036 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
18037 int tmp, ret;
18038 struct vm86plus_struct __user *v86;
18039
18040 +#ifdef CONFIG_GRKERNSEC_VM86
18041 + if (!capable(CAP_SYS_RAWIO)) {
18042 + gr_handle_vm86();
18043 + ret = -EPERM;
18044 + goto out;
18045 + }
18046 +#endif
18047 +
18048 tsk = current;
18049 switch (cmd) {
18050 case VM86_REQUEST_IRQ:
18051 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
18052 tsk->thread.saved_fs = info->regs32->fs;
18053 tsk->thread.saved_gs = get_user_gs(info->regs32);
18054
18055 - tss = &per_cpu(init_tss, get_cpu());
18056 + tss = init_tss + get_cpu();
18057 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18058 if (cpu_has_sep)
18059 tsk->thread.sysenter_cs = 0;
18060 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
18061 goto cannot_handle;
18062 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18063 goto cannot_handle;
18064 - intr_ptr = (unsigned long __user *) (i << 2);
18065 + intr_ptr = (__force unsigned long __user *) (i << 2);
18066 if (get_user(segoffs, intr_ptr))
18067 goto cannot_handle;
18068 if ((segoffs >> 16) == BIOSSEG)
18069 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
18070 index 0f703f1..9e15f64 100644
18071 --- a/arch/x86/kernel/vmlinux.lds.S
18072 +++ b/arch/x86/kernel/vmlinux.lds.S
18073 @@ -26,6 +26,13 @@
18074 #include <asm/page_types.h>
18075 #include <asm/cache.h>
18076 #include <asm/boot.h>
18077 +#include <asm/segment.h>
18078 +
18079 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18080 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18081 +#else
18082 +#define __KERNEL_TEXT_OFFSET 0
18083 +#endif
18084
18085 #undef i386 /* in case the preprocessor is a 32bit one */
18086
18087 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
18088
18089 PHDRS {
18090 text PT_LOAD FLAGS(5); /* R_E */
18091 +#ifdef CONFIG_X86_32
18092 + module PT_LOAD FLAGS(5); /* R_E */
18093 +#endif
18094 +#ifdef CONFIG_XEN
18095 + rodata PT_LOAD FLAGS(5); /* R_E */
18096 +#else
18097 + rodata PT_LOAD FLAGS(4); /* R__ */
18098 +#endif
18099 data PT_LOAD FLAGS(6); /* RW_ */
18100 -#ifdef CONFIG_X86_64
18101 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18102 #ifdef CONFIG_SMP
18103 percpu PT_LOAD FLAGS(6); /* RW_ */
18104 #endif
18105 + text.init PT_LOAD FLAGS(5); /* R_E */
18106 + text.exit PT_LOAD FLAGS(5); /* R_E */
18107 init PT_LOAD FLAGS(7); /* RWE */
18108 -#endif
18109 note PT_NOTE FLAGS(0); /* ___ */
18110 }
18111
18112 SECTIONS
18113 {
18114 #ifdef CONFIG_X86_32
18115 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18116 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18117 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18118 #else
18119 - . = __START_KERNEL;
18120 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18121 + . = __START_KERNEL;
18122 #endif
18123
18124 /* Text and read-only data */
18125 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18126 - _text = .;
18127 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18128 /* bootstrapping code */
18129 +#ifdef CONFIG_X86_32
18130 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18131 +#else
18132 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18133 +#endif
18134 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18135 + _text = .;
18136 HEAD_TEXT
18137 #ifdef CONFIG_X86_32
18138 . = ALIGN(PAGE_SIZE);
18139 @@ -108,13 +128,47 @@ SECTIONS
18140 IRQENTRY_TEXT
18141 *(.fixup)
18142 *(.gnu.warning)
18143 - /* End of text section */
18144 - _etext = .;
18145 } :text = 0x9090
18146
18147 - NOTES :text :note
18148 + . += __KERNEL_TEXT_OFFSET;
18149
18150 - EXCEPTION_TABLE(16) :text = 0x9090
18151 +#ifdef CONFIG_X86_32
18152 + . = ALIGN(PAGE_SIZE);
18153 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18154 +
18155 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18156 + MODULES_EXEC_VADDR = .;
18157 + BYTE(0)
18158 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18159 + . = ALIGN(HPAGE_SIZE);
18160 + MODULES_EXEC_END = . - 1;
18161 +#endif
18162 +
18163 + } :module
18164 +#endif
18165 +
18166 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18167 + /* End of text section */
18168 + _etext = . - __KERNEL_TEXT_OFFSET;
18169 + }
18170 +
18171 +#ifdef CONFIG_X86_32
18172 + . = ALIGN(PAGE_SIZE);
18173 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18174 + *(.idt)
18175 + . = ALIGN(PAGE_SIZE);
18176 + *(.empty_zero_page)
18177 + *(.initial_pg_fixmap)
18178 + *(.initial_pg_pmd)
18179 + *(.initial_page_table)
18180 + *(.swapper_pg_dir)
18181 + } :rodata
18182 +#endif
18183 +
18184 + . = ALIGN(PAGE_SIZE);
18185 + NOTES :rodata :note
18186 +
18187 + EXCEPTION_TABLE(16) :rodata
18188
18189 #if defined(CONFIG_DEBUG_RODATA)
18190 /* .text should occupy whole number of pages */
18191 @@ -126,16 +180,20 @@ SECTIONS
18192
18193 /* Data */
18194 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18195 +
18196 +#ifdef CONFIG_PAX_KERNEXEC
18197 + . = ALIGN(HPAGE_SIZE);
18198 +#else
18199 + . = ALIGN(PAGE_SIZE);
18200 +#endif
18201 +
18202 /* Start of data section */
18203 _sdata = .;
18204
18205 /* init_task */
18206 INIT_TASK_DATA(THREAD_SIZE)
18207
18208 -#ifdef CONFIG_X86_32
18209 - /* 32 bit has nosave before _edata */
18210 NOSAVE_DATA
18211 -#endif
18212
18213 PAGE_ALIGNED_DATA(PAGE_SIZE)
18214
18215 @@ -176,12 +234,19 @@ SECTIONS
18216 #endif /* CONFIG_X86_64 */
18217
18218 /* Init code and data - will be freed after init */
18219 - . = ALIGN(PAGE_SIZE);
18220 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18221 + BYTE(0)
18222 +
18223 +#ifdef CONFIG_PAX_KERNEXEC
18224 + . = ALIGN(HPAGE_SIZE);
18225 +#else
18226 + . = ALIGN(PAGE_SIZE);
18227 +#endif
18228 +
18229 __init_begin = .; /* paired with __init_end */
18230 - }
18231 + } :init.begin
18232
18233 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18234 +#ifdef CONFIG_SMP
18235 /*
18236 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18237 * output PHDR, so the next output section - .init.text - should
18238 @@ -190,12 +255,27 @@ SECTIONS
18239 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
18240 #endif
18241
18242 - INIT_TEXT_SECTION(PAGE_SIZE)
18243 -#ifdef CONFIG_X86_64
18244 - :init
18245 -#endif
18246 + . = ALIGN(PAGE_SIZE);
18247 + init_begin = .;
18248 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18249 + VMLINUX_SYMBOL(_sinittext) = .;
18250 + INIT_TEXT
18251 + VMLINUX_SYMBOL(_einittext) = .;
18252 + . = ALIGN(PAGE_SIZE);
18253 + } :text.init
18254
18255 - INIT_DATA_SECTION(16)
18256 + /*
18257 + * .exit.text is discard at runtime, not link time, to deal with
18258 + * references from .altinstructions and .eh_frame
18259 + */
18260 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18261 + EXIT_TEXT
18262 + . = ALIGN(16);
18263 + } :text.exit
18264 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18265 +
18266 + . = ALIGN(PAGE_SIZE);
18267 + INIT_DATA_SECTION(16) :init
18268
18269 /*
18270 * Code and data for a variety of lowlevel trampolines, to be
18271 @@ -269,19 +349,12 @@ SECTIONS
18272 }
18273
18274 . = ALIGN(8);
18275 - /*
18276 - * .exit.text is discard at runtime, not link time, to deal with
18277 - * references from .altinstructions and .eh_frame
18278 - */
18279 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18280 - EXIT_TEXT
18281 - }
18282
18283 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18284 EXIT_DATA
18285 }
18286
18287 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18288 +#ifndef CONFIG_SMP
18289 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18290 #endif
18291
18292 @@ -300,16 +373,10 @@ SECTIONS
18293 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18294 __smp_locks = .;
18295 *(.smp_locks)
18296 - . = ALIGN(PAGE_SIZE);
18297 __smp_locks_end = .;
18298 + . = ALIGN(PAGE_SIZE);
18299 }
18300
18301 -#ifdef CONFIG_X86_64
18302 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18303 - NOSAVE_DATA
18304 - }
18305 -#endif
18306 -
18307 /* BSS */
18308 . = ALIGN(PAGE_SIZE);
18309 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18310 @@ -325,6 +392,7 @@ SECTIONS
18311 __brk_base = .;
18312 . += 64 * 1024; /* 64k alignment slop space */
18313 *(.brk_reservation) /* areas brk users have reserved */
18314 + . = ALIGN(HPAGE_SIZE);
18315 __brk_limit = .;
18316 }
18317
18318 @@ -351,13 +419,12 @@ SECTIONS
18319 * for the boot processor.
18320 */
18321 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18322 -INIT_PER_CPU(gdt_page);
18323 INIT_PER_CPU(irq_stack_union);
18324
18325 /*
18326 * Build-time check on the image size:
18327 */
18328 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18329 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18330 "kernel image bigger than KERNEL_IMAGE_SIZE");
18331
18332 #ifdef CONFIG_SMP
18333 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18334 index e4d4a22..47ee71f 100644
18335 --- a/arch/x86/kernel/vsyscall_64.c
18336 +++ b/arch/x86/kernel/vsyscall_64.c
18337 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18338 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18339 };
18340
18341 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18342 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18343
18344 static int __init vsyscall_setup(char *str)
18345 {
18346 if (str) {
18347 if (!strcmp("emulate", str))
18348 vsyscall_mode = EMULATE;
18349 - else if (!strcmp("native", str))
18350 - vsyscall_mode = NATIVE;
18351 else if (!strcmp("none", str))
18352 vsyscall_mode = NONE;
18353 else
18354 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18355
18356 tsk = current;
18357 if (seccomp_mode(&tsk->seccomp))
18358 - do_exit(SIGKILL);
18359 + do_group_exit(SIGKILL);
18360
18361 switch (vsyscall_nr) {
18362 case 0:
18363 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18364 return true;
18365
18366 sigsegv:
18367 - force_sig(SIGSEGV, current);
18368 - return true;
18369 + do_group_exit(SIGKILL);
18370 }
18371
18372 /*
18373 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18374 extern char __vvar_page;
18375 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18376
18377 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18378 - vsyscall_mode == NATIVE
18379 - ? PAGE_KERNEL_VSYSCALL
18380 - : PAGE_KERNEL_VVAR);
18381 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18382 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18383 (unsigned long)VSYSCALL_START);
18384
18385 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18386 index 9796c2f..f686fbf 100644
18387 --- a/arch/x86/kernel/x8664_ksyms_64.c
18388 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18389 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18390 EXPORT_SYMBOL(copy_user_generic_string);
18391 EXPORT_SYMBOL(copy_user_generic_unrolled);
18392 EXPORT_SYMBOL(__copy_user_nocache);
18393 -EXPORT_SYMBOL(_copy_from_user);
18394 -EXPORT_SYMBOL(_copy_to_user);
18395
18396 EXPORT_SYMBOL(copy_page);
18397 EXPORT_SYMBOL(clear_page);
18398 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18399 index a391134..d0b63b6e 100644
18400 --- a/arch/x86/kernel/xsave.c
18401 +++ b/arch/x86/kernel/xsave.c
18402 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18403 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18404 return -EINVAL;
18405
18406 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18407 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18408 fx_sw_user->extended_size -
18409 FP_XSTATE_MAGIC2_SIZE));
18410 if (err)
18411 @@ -267,7 +267,7 @@ fx_only:
18412 * the other extended state.
18413 */
18414 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18415 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18416 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18417 }
18418
18419 /*
18420 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18421 if (use_xsave())
18422 err = restore_user_xstate(buf);
18423 else
18424 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18425 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18426 buf);
18427 if (unlikely(err)) {
18428 /*
18429 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18430 index f1e3be1..588efc8 100644
18431 --- a/arch/x86/kvm/emulate.c
18432 +++ b/arch/x86/kvm/emulate.c
18433 @@ -249,6 +249,7 @@ struct gprefix {
18434
18435 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18436 do { \
18437 + unsigned long _tmp; \
18438 __asm__ __volatile__ ( \
18439 _PRE_EFLAGS("0", "4", "2") \
18440 _op _suffix " %"_x"3,%1; " \
18441 @@ -263,8 +264,6 @@ struct gprefix {
18442 /* Raw emulation: instruction has two explicit operands. */
18443 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18444 do { \
18445 - unsigned long _tmp; \
18446 - \
18447 switch ((ctxt)->dst.bytes) { \
18448 case 2: \
18449 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18450 @@ -280,7 +279,6 @@ struct gprefix {
18451
18452 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18453 do { \
18454 - unsigned long _tmp; \
18455 switch ((ctxt)->dst.bytes) { \
18456 case 1: \
18457 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18458 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18459 index 54abb40..a192606 100644
18460 --- a/arch/x86/kvm/lapic.c
18461 +++ b/arch/x86/kvm/lapic.c
18462 @@ -53,7 +53,7 @@
18463 #define APIC_BUS_CYCLE_NS 1
18464
18465 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18466 -#define apic_debug(fmt, arg...)
18467 +#define apic_debug(fmt, arg...) do {} while (0)
18468
18469 #define APIC_LVT_NUM 6
18470 /* 14 is the version for Xeon and Pentium 8.4.8*/
18471 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18472 index f1b36cf..af8a124 100644
18473 --- a/arch/x86/kvm/mmu.c
18474 +++ b/arch/x86/kvm/mmu.c
18475 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18476
18477 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18478
18479 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18480 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18481
18482 /*
18483 * Assume that the pte write on a page table of the same type
18484 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18485 }
18486
18487 spin_lock(&vcpu->kvm->mmu_lock);
18488 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18489 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18490 gentry = 0;
18491 kvm_mmu_free_some_pages(vcpu);
18492 ++vcpu->kvm->stat.mmu_pte_write;
18493 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18494 index 9299410..ade2f9b 100644
18495 --- a/arch/x86/kvm/paging_tmpl.h
18496 +++ b/arch/x86/kvm/paging_tmpl.h
18497 @@ -197,7 +197,7 @@ retry_walk:
18498 if (unlikely(kvm_is_error_hva(host_addr)))
18499 goto error;
18500
18501 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18502 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18503 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18504 goto error;
18505
18506 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18507 if (need_flush)
18508 kvm_flush_remote_tlbs(vcpu->kvm);
18509
18510 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18511 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18512
18513 spin_unlock(&vcpu->kvm->mmu_lock);
18514
18515 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18516 index e32243e..a6e6172 100644
18517 --- a/arch/x86/kvm/svm.c
18518 +++ b/arch/x86/kvm/svm.c
18519 @@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18520 int cpu = raw_smp_processor_id();
18521
18522 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18523 +
18524 + pax_open_kernel();
18525 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18526 + pax_close_kernel();
18527 +
18528 load_TR_desc();
18529 }
18530
18531 @@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18532 #endif
18533 #endif
18534
18535 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18536 + __set_fs(current_thread_info()->addr_limit);
18537 +#endif
18538 +
18539 reload_tss(vcpu);
18540
18541 local_irq_disable();
18542 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18543 index 579a0b5..ed7bbf9 100644
18544 --- a/arch/x86/kvm/vmx.c
18545 +++ b/arch/x86/kvm/vmx.c
18546 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
18547 struct desc_struct *descs;
18548
18549 descs = (void *)gdt->address;
18550 +
18551 + pax_open_kernel();
18552 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18553 + pax_close_kernel();
18554 +
18555 load_TR_desc();
18556 }
18557
18558 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18559 if (!cpu_has_vmx_flexpriority())
18560 flexpriority_enabled = 0;
18561
18562 - if (!cpu_has_vmx_tpr_shadow())
18563 - kvm_x86_ops->update_cr8_intercept = NULL;
18564 + if (!cpu_has_vmx_tpr_shadow()) {
18565 + pax_open_kernel();
18566 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18567 + pax_close_kernel();
18568 + }
18569
18570 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18571 kvm_disable_largepages();
18572 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18573 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18574
18575 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18576 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18577 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18578
18579 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18580 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18581 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18582 "jmp .Lkvm_vmx_return \n\t"
18583 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18584 ".Lkvm_vmx_return: "
18585 +
18586 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18587 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18588 + ".Lkvm_vmx_return2: "
18589 +#endif
18590 +
18591 /* Save guest registers, load host registers, keep flags */
18592 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18593 "pop %0 \n\t"
18594 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18595 #endif
18596 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18597 [wordsize]"i"(sizeof(ulong))
18598 +
18599 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18600 + ,[cs]"i"(__KERNEL_CS)
18601 +#endif
18602 +
18603 : "cc", "memory"
18604 , R"ax", R"bx", R"di", R"si"
18605 #ifdef CONFIG_X86_64
18606 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18607 }
18608 }
18609
18610 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18611 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18612 +
18613 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18614 + loadsegment(fs, __KERNEL_PERCPU);
18615 +#endif
18616 +
18617 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18618 + __set_fs(current_thread_info()->addr_limit);
18619 +#endif
18620 +
18621 vmx->loaded_vmcs->launched = 1;
18622
18623 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18624 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18625 index 4c938da..4ddef65 100644
18626 --- a/arch/x86/kvm/x86.c
18627 +++ b/arch/x86/kvm/x86.c
18628 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18629 {
18630 struct kvm *kvm = vcpu->kvm;
18631 int lm = is_long_mode(vcpu);
18632 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18633 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18634 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18635 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18636 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18637 : kvm->arch.xen_hvm_config.blob_size_32;
18638 u32 page_num = data & ~PAGE_MASK;
18639 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18640 if (n < msr_list.nmsrs)
18641 goto out;
18642 r = -EFAULT;
18643 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18644 + goto out;
18645 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18646 num_msrs_to_save * sizeof(u32)))
18647 goto out;
18648 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18649 struct kvm_cpuid2 *cpuid,
18650 struct kvm_cpuid_entry2 __user *entries)
18651 {
18652 - int r;
18653 + int r, i;
18654
18655 r = -E2BIG;
18656 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18657 goto out;
18658 r = -EFAULT;
18659 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18660 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18661 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18662 goto out;
18663 + for (i = 0; i < cpuid->nent; ++i) {
18664 + struct kvm_cpuid_entry2 cpuid_entry;
18665 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18666 + goto out;
18667 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18668 + }
18669 vcpu->arch.cpuid_nent = cpuid->nent;
18670 kvm_apic_set_version(vcpu);
18671 kvm_x86_ops->cpuid_update(vcpu);
18672 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18673 struct kvm_cpuid2 *cpuid,
18674 struct kvm_cpuid_entry2 __user *entries)
18675 {
18676 - int r;
18677 + int r, i;
18678
18679 r = -E2BIG;
18680 if (cpuid->nent < vcpu->arch.cpuid_nent)
18681 goto out;
18682 r = -EFAULT;
18683 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18684 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18685 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18686 goto out;
18687 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18688 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18689 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18690 + goto out;
18691 + }
18692 return 0;
18693
18694 out:
18695 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18696 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18697 struct kvm_interrupt *irq)
18698 {
18699 - if (irq->irq < 0 || irq->irq >= 256)
18700 + if (irq->irq >= 256)
18701 return -EINVAL;
18702 if (irqchip_in_kernel(vcpu->kvm))
18703 return -ENXIO;
18704 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18705 kvm_mmu_set_mmio_spte_mask(mask);
18706 }
18707
18708 -int kvm_arch_init(void *opaque)
18709 +int kvm_arch_init(const void *opaque)
18710 {
18711 int r;
18712 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18713 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18714 index cf4603b..7cdde38 100644
18715 --- a/arch/x86/lguest/boot.c
18716 +++ b/arch/x86/lguest/boot.c
18717 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18718 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18719 * Launcher to reboot us.
18720 */
18721 -static void lguest_restart(char *reason)
18722 +static __noreturn void lguest_restart(char *reason)
18723 {
18724 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18725 + BUG();
18726 }
18727
18728 /*G:050
18729 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18730 index 042f682..c92afb6 100644
18731 --- a/arch/x86/lib/atomic64_32.c
18732 +++ b/arch/x86/lib/atomic64_32.c
18733 @@ -8,18 +8,30 @@
18734
18735 long long atomic64_read_cx8(long long, const atomic64_t *v);
18736 EXPORT_SYMBOL(atomic64_read_cx8);
18737 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18738 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18739 long long atomic64_set_cx8(long long, const atomic64_t *v);
18740 EXPORT_SYMBOL(atomic64_set_cx8);
18741 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18742 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18743 long long atomic64_xchg_cx8(long long, unsigned high);
18744 EXPORT_SYMBOL(atomic64_xchg_cx8);
18745 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18746 EXPORT_SYMBOL(atomic64_add_return_cx8);
18747 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18748 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18749 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18750 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18751 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18752 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18753 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18754 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18755 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18756 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18757 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18758 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18759 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18760 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18761 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18762 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18763 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18764 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18765 #ifndef CONFIG_X86_CMPXCHG64
18766 long long atomic64_read_386(long long, const atomic64_t *v);
18767 EXPORT_SYMBOL(atomic64_read_386);
18768 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18769 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18770 long long atomic64_set_386(long long, const atomic64_t *v);
18771 EXPORT_SYMBOL(atomic64_set_386);
18772 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18773 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18774 long long atomic64_xchg_386(long long, unsigned high);
18775 EXPORT_SYMBOL(atomic64_xchg_386);
18776 long long atomic64_add_return_386(long long a, atomic64_t *v);
18777 EXPORT_SYMBOL(atomic64_add_return_386);
18778 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18779 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18780 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18781 EXPORT_SYMBOL(atomic64_sub_return_386);
18782 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18783 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18784 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18785 EXPORT_SYMBOL(atomic64_inc_return_386);
18786 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18787 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18788 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18789 EXPORT_SYMBOL(atomic64_dec_return_386);
18790 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18791 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18792 long long atomic64_add_386(long long a, atomic64_t *v);
18793 EXPORT_SYMBOL(atomic64_add_386);
18794 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18795 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18796 long long atomic64_sub_386(long long a, atomic64_t *v);
18797 EXPORT_SYMBOL(atomic64_sub_386);
18798 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18799 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18800 long long atomic64_inc_386(long long a, atomic64_t *v);
18801 EXPORT_SYMBOL(atomic64_inc_386);
18802 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18803 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18804 long long atomic64_dec_386(long long a, atomic64_t *v);
18805 EXPORT_SYMBOL(atomic64_dec_386);
18806 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18807 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18808 long long atomic64_dec_if_positive_386(atomic64_t *v);
18809 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18810 int atomic64_inc_not_zero_386(atomic64_t *v);
18811 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18812 index e8e7e0d..56fd1b0 100644
18813 --- a/arch/x86/lib/atomic64_386_32.S
18814 +++ b/arch/x86/lib/atomic64_386_32.S
18815 @@ -48,6 +48,10 @@ BEGIN(read)
18816 movl (v), %eax
18817 movl 4(v), %edx
18818 RET_ENDP
18819 +BEGIN(read_unchecked)
18820 + movl (v), %eax
18821 + movl 4(v), %edx
18822 +RET_ENDP
18823 #undef v
18824
18825 #define v %esi
18826 @@ -55,6 +59,10 @@ BEGIN(set)
18827 movl %ebx, (v)
18828 movl %ecx, 4(v)
18829 RET_ENDP
18830 +BEGIN(set_unchecked)
18831 + movl %ebx, (v)
18832 + movl %ecx, 4(v)
18833 +RET_ENDP
18834 #undef v
18835
18836 #define v %esi
18837 @@ -70,6 +78,20 @@ RET_ENDP
18838 BEGIN(add)
18839 addl %eax, (v)
18840 adcl %edx, 4(v)
18841 +
18842 +#ifdef CONFIG_PAX_REFCOUNT
18843 + jno 0f
18844 + subl %eax, (v)
18845 + sbbl %edx, 4(v)
18846 + int $4
18847 +0:
18848 + _ASM_EXTABLE(0b, 0b)
18849 +#endif
18850 +
18851 +RET_ENDP
18852 +BEGIN(add_unchecked)
18853 + addl %eax, (v)
18854 + adcl %edx, 4(v)
18855 RET_ENDP
18856 #undef v
18857
18858 @@ -77,6 +99,24 @@ RET_ENDP
18859 BEGIN(add_return)
18860 addl (v), %eax
18861 adcl 4(v), %edx
18862 +
18863 +#ifdef CONFIG_PAX_REFCOUNT
18864 + into
18865 +1234:
18866 + _ASM_EXTABLE(1234b, 2f)
18867 +#endif
18868 +
18869 + movl %eax, (v)
18870 + movl %edx, 4(v)
18871 +
18872 +#ifdef CONFIG_PAX_REFCOUNT
18873 +2:
18874 +#endif
18875 +
18876 +RET_ENDP
18877 +BEGIN(add_return_unchecked)
18878 + addl (v), %eax
18879 + adcl 4(v), %edx
18880 movl %eax, (v)
18881 movl %edx, 4(v)
18882 RET_ENDP
18883 @@ -86,6 +126,20 @@ RET_ENDP
18884 BEGIN(sub)
18885 subl %eax, (v)
18886 sbbl %edx, 4(v)
18887 +
18888 +#ifdef CONFIG_PAX_REFCOUNT
18889 + jno 0f
18890 + addl %eax, (v)
18891 + adcl %edx, 4(v)
18892 + int $4
18893 +0:
18894 + _ASM_EXTABLE(0b, 0b)
18895 +#endif
18896 +
18897 +RET_ENDP
18898 +BEGIN(sub_unchecked)
18899 + subl %eax, (v)
18900 + sbbl %edx, 4(v)
18901 RET_ENDP
18902 #undef v
18903
18904 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18905 sbbl $0, %edx
18906 addl (v), %eax
18907 adcl 4(v), %edx
18908 +
18909 +#ifdef CONFIG_PAX_REFCOUNT
18910 + into
18911 +1234:
18912 + _ASM_EXTABLE(1234b, 2f)
18913 +#endif
18914 +
18915 + movl %eax, (v)
18916 + movl %edx, 4(v)
18917 +
18918 +#ifdef CONFIG_PAX_REFCOUNT
18919 +2:
18920 +#endif
18921 +
18922 +RET_ENDP
18923 +BEGIN(sub_return_unchecked)
18924 + negl %edx
18925 + negl %eax
18926 + sbbl $0, %edx
18927 + addl (v), %eax
18928 + adcl 4(v), %edx
18929 movl %eax, (v)
18930 movl %edx, 4(v)
18931 RET_ENDP
18932 @@ -105,6 +180,20 @@ RET_ENDP
18933 BEGIN(inc)
18934 addl $1, (v)
18935 adcl $0, 4(v)
18936 +
18937 +#ifdef CONFIG_PAX_REFCOUNT
18938 + jno 0f
18939 + subl $1, (v)
18940 + sbbl $0, 4(v)
18941 + int $4
18942 +0:
18943 + _ASM_EXTABLE(0b, 0b)
18944 +#endif
18945 +
18946 +RET_ENDP
18947 +BEGIN(inc_unchecked)
18948 + addl $1, (v)
18949 + adcl $0, 4(v)
18950 RET_ENDP
18951 #undef v
18952
18953 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18954 movl 4(v), %edx
18955 addl $1, %eax
18956 adcl $0, %edx
18957 +
18958 +#ifdef CONFIG_PAX_REFCOUNT
18959 + into
18960 +1234:
18961 + _ASM_EXTABLE(1234b, 2f)
18962 +#endif
18963 +
18964 + movl %eax, (v)
18965 + movl %edx, 4(v)
18966 +
18967 +#ifdef CONFIG_PAX_REFCOUNT
18968 +2:
18969 +#endif
18970 +
18971 +RET_ENDP
18972 +BEGIN(inc_return_unchecked)
18973 + movl (v), %eax
18974 + movl 4(v), %edx
18975 + addl $1, %eax
18976 + adcl $0, %edx
18977 movl %eax, (v)
18978 movl %edx, 4(v)
18979 RET_ENDP
18980 @@ -123,6 +232,20 @@ RET_ENDP
18981 BEGIN(dec)
18982 subl $1, (v)
18983 sbbl $0, 4(v)
18984 +
18985 +#ifdef CONFIG_PAX_REFCOUNT
18986 + jno 0f
18987 + addl $1, (v)
18988 + adcl $0, 4(v)
18989 + int $4
18990 +0:
18991 + _ASM_EXTABLE(0b, 0b)
18992 +#endif
18993 +
18994 +RET_ENDP
18995 +BEGIN(dec_unchecked)
18996 + subl $1, (v)
18997 + sbbl $0, 4(v)
18998 RET_ENDP
18999 #undef v
19000
19001 @@ -132,6 +255,26 @@ BEGIN(dec_return)
19002 movl 4(v), %edx
19003 subl $1, %eax
19004 sbbl $0, %edx
19005 +
19006 +#ifdef CONFIG_PAX_REFCOUNT
19007 + into
19008 +1234:
19009 + _ASM_EXTABLE(1234b, 2f)
19010 +#endif
19011 +
19012 + movl %eax, (v)
19013 + movl %edx, 4(v)
19014 +
19015 +#ifdef CONFIG_PAX_REFCOUNT
19016 +2:
19017 +#endif
19018 +
19019 +RET_ENDP
19020 +BEGIN(dec_return_unchecked)
19021 + movl (v), %eax
19022 + movl 4(v), %edx
19023 + subl $1, %eax
19024 + sbbl $0, %edx
19025 movl %eax, (v)
19026 movl %edx, 4(v)
19027 RET_ENDP
19028 @@ -143,6 +286,13 @@ BEGIN(add_unless)
19029 adcl %edx, %edi
19030 addl (v), %eax
19031 adcl 4(v), %edx
19032 +
19033 +#ifdef CONFIG_PAX_REFCOUNT
19034 + into
19035 +1234:
19036 + _ASM_EXTABLE(1234b, 2f)
19037 +#endif
19038 +
19039 cmpl %eax, %esi
19040 je 3f
19041 1:
19042 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
19043 1:
19044 addl $1, %eax
19045 adcl $0, %edx
19046 +
19047 +#ifdef CONFIG_PAX_REFCOUNT
19048 + into
19049 +1234:
19050 + _ASM_EXTABLE(1234b, 2f)
19051 +#endif
19052 +
19053 movl %eax, (v)
19054 movl %edx, 4(v)
19055 movl $1, %eax
19056 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
19057 movl 4(v), %edx
19058 subl $1, %eax
19059 sbbl $0, %edx
19060 +
19061 +#ifdef CONFIG_PAX_REFCOUNT
19062 + into
19063 +1234:
19064 + _ASM_EXTABLE(1234b, 1f)
19065 +#endif
19066 +
19067 js 1f
19068 movl %eax, (v)
19069 movl %edx, 4(v)
19070 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
19071 index 391a083..d658e9f 100644
19072 --- a/arch/x86/lib/atomic64_cx8_32.S
19073 +++ b/arch/x86/lib/atomic64_cx8_32.S
19074 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
19075 CFI_STARTPROC
19076
19077 read64 %ecx
19078 + pax_force_retaddr
19079 ret
19080 CFI_ENDPROC
19081 ENDPROC(atomic64_read_cx8)
19082
19083 +ENTRY(atomic64_read_unchecked_cx8)
19084 + CFI_STARTPROC
19085 +
19086 + read64 %ecx
19087 + pax_force_retaddr
19088 + ret
19089 + CFI_ENDPROC
19090 +ENDPROC(atomic64_read_unchecked_cx8)
19091 +
19092 ENTRY(atomic64_set_cx8)
19093 CFI_STARTPROC
19094
19095 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
19096 cmpxchg8b (%esi)
19097 jne 1b
19098
19099 + pax_force_retaddr
19100 ret
19101 CFI_ENDPROC
19102 ENDPROC(atomic64_set_cx8)
19103
19104 +ENTRY(atomic64_set_unchecked_cx8)
19105 + CFI_STARTPROC
19106 +
19107 +1:
19108 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
19109 + * are atomic on 586 and newer */
19110 + cmpxchg8b (%esi)
19111 + jne 1b
19112 +
19113 + pax_force_retaddr
19114 + ret
19115 + CFI_ENDPROC
19116 +ENDPROC(atomic64_set_unchecked_cx8)
19117 +
19118 ENTRY(atomic64_xchg_cx8)
19119 CFI_STARTPROC
19120
19121 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
19122 cmpxchg8b (%esi)
19123 jne 1b
19124
19125 + pax_force_retaddr
19126 ret
19127 CFI_ENDPROC
19128 ENDPROC(atomic64_xchg_cx8)
19129
19130 -.macro addsub_return func ins insc
19131 -ENTRY(atomic64_\func\()_return_cx8)
19132 +.macro addsub_return func ins insc unchecked=""
19133 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19134 CFI_STARTPROC
19135 SAVE ebp
19136 SAVE ebx
19137 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
19138 movl %edx, %ecx
19139 \ins\()l %esi, %ebx
19140 \insc\()l %edi, %ecx
19141 +
19142 +.ifb \unchecked
19143 +#ifdef CONFIG_PAX_REFCOUNT
19144 + into
19145 +2:
19146 + _ASM_EXTABLE(2b, 3f)
19147 +#endif
19148 +.endif
19149 +
19150 LOCK_PREFIX
19151 cmpxchg8b (%ebp)
19152 jne 1b
19153 -
19154 -10:
19155 movl %ebx, %eax
19156 movl %ecx, %edx
19157 +
19158 +.ifb \unchecked
19159 +#ifdef CONFIG_PAX_REFCOUNT
19160 +3:
19161 +#endif
19162 +.endif
19163 +
19164 RESTORE edi
19165 RESTORE esi
19166 RESTORE ebx
19167 RESTORE ebp
19168 + pax_force_retaddr
19169 ret
19170 CFI_ENDPROC
19171 -ENDPROC(atomic64_\func\()_return_cx8)
19172 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19173 .endm
19174
19175 addsub_return add add adc
19176 addsub_return sub sub sbb
19177 +addsub_return add add adc _unchecked
19178 +addsub_return sub sub sbb _unchecked
19179
19180 -.macro incdec_return func ins insc
19181 -ENTRY(atomic64_\func\()_return_cx8)
19182 +.macro incdec_return func ins insc unchecked
19183 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19184 CFI_STARTPROC
19185 SAVE ebx
19186
19187 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
19188 movl %edx, %ecx
19189 \ins\()l $1, %ebx
19190 \insc\()l $0, %ecx
19191 +
19192 +.ifb \unchecked
19193 +#ifdef CONFIG_PAX_REFCOUNT
19194 + into
19195 +2:
19196 + _ASM_EXTABLE(2b, 3f)
19197 +#endif
19198 +.endif
19199 +
19200 LOCK_PREFIX
19201 cmpxchg8b (%esi)
19202 jne 1b
19203
19204 -10:
19205 movl %ebx, %eax
19206 movl %ecx, %edx
19207 +
19208 +.ifb \unchecked
19209 +#ifdef CONFIG_PAX_REFCOUNT
19210 +3:
19211 +#endif
19212 +.endif
19213 +
19214 RESTORE ebx
19215 + pax_force_retaddr
19216 ret
19217 CFI_ENDPROC
19218 -ENDPROC(atomic64_\func\()_return_cx8)
19219 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19220 .endm
19221
19222 incdec_return inc add adc
19223 incdec_return dec sub sbb
19224 +incdec_return inc add adc _unchecked
19225 +incdec_return dec sub sbb _unchecked
19226
19227 ENTRY(atomic64_dec_if_positive_cx8)
19228 CFI_STARTPROC
19229 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
19230 movl %edx, %ecx
19231 subl $1, %ebx
19232 sbb $0, %ecx
19233 +
19234 +#ifdef CONFIG_PAX_REFCOUNT
19235 + into
19236 +1234:
19237 + _ASM_EXTABLE(1234b, 2f)
19238 +#endif
19239 +
19240 js 2f
19241 LOCK_PREFIX
19242 cmpxchg8b (%esi)
19243 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
19244 movl %ebx, %eax
19245 movl %ecx, %edx
19246 RESTORE ebx
19247 + pax_force_retaddr
19248 ret
19249 CFI_ENDPROC
19250 ENDPROC(atomic64_dec_if_positive_cx8)
19251 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
19252 movl %edx, %ecx
19253 addl %esi, %ebx
19254 adcl %edi, %ecx
19255 +
19256 +#ifdef CONFIG_PAX_REFCOUNT
19257 + into
19258 +1234:
19259 + _ASM_EXTABLE(1234b, 3f)
19260 +#endif
19261 +
19262 LOCK_PREFIX
19263 cmpxchg8b (%ebp)
19264 jne 1b
19265 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19266 CFI_ADJUST_CFA_OFFSET -8
19267 RESTORE ebx
19268 RESTORE ebp
19269 + pax_force_retaddr
19270 ret
19271 4:
19272 cmpl %edx, 4(%esp)
19273 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19274 movl %edx, %ecx
19275 addl $1, %ebx
19276 adcl $0, %ecx
19277 +
19278 +#ifdef CONFIG_PAX_REFCOUNT
19279 + into
19280 +1234:
19281 + _ASM_EXTABLE(1234b, 3f)
19282 +#endif
19283 +
19284 LOCK_PREFIX
19285 cmpxchg8b (%esi)
19286 jne 1b
19287 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19288 movl $1, %eax
19289 3:
19290 RESTORE ebx
19291 + pax_force_retaddr
19292 ret
19293 4:
19294 testl %edx, %edx
19295 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19296 index 78d16a5..fbcf666 100644
19297 --- a/arch/x86/lib/checksum_32.S
19298 +++ b/arch/x86/lib/checksum_32.S
19299 @@ -28,7 +28,8 @@
19300 #include <linux/linkage.h>
19301 #include <asm/dwarf2.h>
19302 #include <asm/errno.h>
19303 -
19304 +#include <asm/segment.h>
19305 +
19306 /*
19307 * computes a partial checksum, e.g. for TCP/UDP fragments
19308 */
19309 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19310
19311 #define ARGBASE 16
19312 #define FP 12
19313 -
19314 -ENTRY(csum_partial_copy_generic)
19315 +
19316 +ENTRY(csum_partial_copy_generic_to_user)
19317 CFI_STARTPROC
19318 +
19319 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19320 + pushl_cfi %gs
19321 + popl_cfi %es
19322 + jmp csum_partial_copy_generic
19323 +#endif
19324 +
19325 +ENTRY(csum_partial_copy_generic_from_user)
19326 +
19327 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19328 + pushl_cfi %gs
19329 + popl_cfi %ds
19330 +#endif
19331 +
19332 +ENTRY(csum_partial_copy_generic)
19333 subl $4,%esp
19334 CFI_ADJUST_CFA_OFFSET 4
19335 pushl_cfi %edi
19336 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19337 jmp 4f
19338 SRC(1: movw (%esi), %bx )
19339 addl $2, %esi
19340 -DST( movw %bx, (%edi) )
19341 +DST( movw %bx, %es:(%edi) )
19342 addl $2, %edi
19343 addw %bx, %ax
19344 adcl $0, %eax
19345 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19346 SRC(1: movl (%esi), %ebx )
19347 SRC( movl 4(%esi), %edx )
19348 adcl %ebx, %eax
19349 -DST( movl %ebx, (%edi) )
19350 +DST( movl %ebx, %es:(%edi) )
19351 adcl %edx, %eax
19352 -DST( movl %edx, 4(%edi) )
19353 +DST( movl %edx, %es:4(%edi) )
19354
19355 SRC( movl 8(%esi), %ebx )
19356 SRC( movl 12(%esi), %edx )
19357 adcl %ebx, %eax
19358 -DST( movl %ebx, 8(%edi) )
19359 +DST( movl %ebx, %es:8(%edi) )
19360 adcl %edx, %eax
19361 -DST( movl %edx, 12(%edi) )
19362 +DST( movl %edx, %es:12(%edi) )
19363
19364 SRC( movl 16(%esi), %ebx )
19365 SRC( movl 20(%esi), %edx )
19366 adcl %ebx, %eax
19367 -DST( movl %ebx, 16(%edi) )
19368 +DST( movl %ebx, %es:16(%edi) )
19369 adcl %edx, %eax
19370 -DST( movl %edx, 20(%edi) )
19371 +DST( movl %edx, %es:20(%edi) )
19372
19373 SRC( movl 24(%esi), %ebx )
19374 SRC( movl 28(%esi), %edx )
19375 adcl %ebx, %eax
19376 -DST( movl %ebx, 24(%edi) )
19377 +DST( movl %ebx, %es:24(%edi) )
19378 adcl %edx, %eax
19379 -DST( movl %edx, 28(%edi) )
19380 +DST( movl %edx, %es:28(%edi) )
19381
19382 lea 32(%esi), %esi
19383 lea 32(%edi), %edi
19384 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19385 shrl $2, %edx # This clears CF
19386 SRC(3: movl (%esi), %ebx )
19387 adcl %ebx, %eax
19388 -DST( movl %ebx, (%edi) )
19389 +DST( movl %ebx, %es:(%edi) )
19390 lea 4(%esi), %esi
19391 lea 4(%edi), %edi
19392 dec %edx
19393 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19394 jb 5f
19395 SRC( movw (%esi), %cx )
19396 leal 2(%esi), %esi
19397 -DST( movw %cx, (%edi) )
19398 +DST( movw %cx, %es:(%edi) )
19399 leal 2(%edi), %edi
19400 je 6f
19401 shll $16,%ecx
19402 SRC(5: movb (%esi), %cl )
19403 -DST( movb %cl, (%edi) )
19404 +DST( movb %cl, %es:(%edi) )
19405 6: addl %ecx, %eax
19406 adcl $0, %eax
19407 7:
19408 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19409
19410 6001:
19411 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19412 - movl $-EFAULT, (%ebx)
19413 + movl $-EFAULT, %ss:(%ebx)
19414
19415 # zero the complete destination - computing the rest
19416 # is too much work
19417 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19418
19419 6002:
19420 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19421 - movl $-EFAULT,(%ebx)
19422 + movl $-EFAULT,%ss:(%ebx)
19423 jmp 5000b
19424
19425 .previous
19426
19427 + pushl_cfi %ss
19428 + popl_cfi %ds
19429 + pushl_cfi %ss
19430 + popl_cfi %es
19431 popl_cfi %ebx
19432 CFI_RESTORE ebx
19433 popl_cfi %esi
19434 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19435 popl_cfi %ecx # equivalent to addl $4,%esp
19436 ret
19437 CFI_ENDPROC
19438 -ENDPROC(csum_partial_copy_generic)
19439 +ENDPROC(csum_partial_copy_generic_to_user)
19440
19441 #else
19442
19443 /* Version for PentiumII/PPro */
19444
19445 #define ROUND1(x) \
19446 + nop; nop; nop; \
19447 SRC(movl x(%esi), %ebx ) ; \
19448 addl %ebx, %eax ; \
19449 - DST(movl %ebx, x(%edi) ) ;
19450 + DST(movl %ebx, %es:x(%edi)) ;
19451
19452 #define ROUND(x) \
19453 + nop; nop; nop; \
19454 SRC(movl x(%esi), %ebx ) ; \
19455 adcl %ebx, %eax ; \
19456 - DST(movl %ebx, x(%edi) ) ;
19457 + DST(movl %ebx, %es:x(%edi)) ;
19458
19459 #define ARGBASE 12
19460 -
19461 -ENTRY(csum_partial_copy_generic)
19462 +
19463 +ENTRY(csum_partial_copy_generic_to_user)
19464 CFI_STARTPROC
19465 +
19466 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19467 + pushl_cfi %gs
19468 + popl_cfi %es
19469 + jmp csum_partial_copy_generic
19470 +#endif
19471 +
19472 +ENTRY(csum_partial_copy_generic_from_user)
19473 +
19474 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19475 + pushl_cfi %gs
19476 + popl_cfi %ds
19477 +#endif
19478 +
19479 +ENTRY(csum_partial_copy_generic)
19480 pushl_cfi %ebx
19481 CFI_REL_OFFSET ebx, 0
19482 pushl_cfi %edi
19483 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19484 subl %ebx, %edi
19485 lea -1(%esi),%edx
19486 andl $-32,%edx
19487 - lea 3f(%ebx,%ebx), %ebx
19488 + lea 3f(%ebx,%ebx,2), %ebx
19489 testl %esi, %esi
19490 jmp *%ebx
19491 1: addl $64,%esi
19492 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19493 jb 5f
19494 SRC( movw (%esi), %dx )
19495 leal 2(%esi), %esi
19496 -DST( movw %dx, (%edi) )
19497 +DST( movw %dx, %es:(%edi) )
19498 leal 2(%edi), %edi
19499 je 6f
19500 shll $16,%edx
19501 5:
19502 SRC( movb (%esi), %dl )
19503 -DST( movb %dl, (%edi) )
19504 +DST( movb %dl, %es:(%edi) )
19505 6: addl %edx, %eax
19506 adcl $0, %eax
19507 7:
19508 .section .fixup, "ax"
19509 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19510 - movl $-EFAULT, (%ebx)
19511 + movl $-EFAULT, %ss:(%ebx)
19512 # zero the complete destination (computing the rest is too much work)
19513 movl ARGBASE+8(%esp),%edi # dst
19514 movl ARGBASE+12(%esp),%ecx # len
19515 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19516 rep; stosb
19517 jmp 7b
19518 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19519 - movl $-EFAULT, (%ebx)
19520 + movl $-EFAULT, %ss:(%ebx)
19521 jmp 7b
19522 .previous
19523
19524 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19525 + pushl_cfi %ss
19526 + popl_cfi %ds
19527 + pushl_cfi %ss
19528 + popl_cfi %es
19529 +#endif
19530 +
19531 popl_cfi %esi
19532 CFI_RESTORE esi
19533 popl_cfi %edi
19534 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19535 CFI_RESTORE ebx
19536 ret
19537 CFI_ENDPROC
19538 -ENDPROC(csum_partial_copy_generic)
19539 +ENDPROC(csum_partial_copy_generic_to_user)
19540
19541 #undef ROUND
19542 #undef ROUND1
19543 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19544 index f2145cf..cea889d 100644
19545 --- a/arch/x86/lib/clear_page_64.S
19546 +++ b/arch/x86/lib/clear_page_64.S
19547 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19548 movl $4096/8,%ecx
19549 xorl %eax,%eax
19550 rep stosq
19551 + pax_force_retaddr
19552 ret
19553 CFI_ENDPROC
19554 ENDPROC(clear_page_c)
19555 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19556 movl $4096,%ecx
19557 xorl %eax,%eax
19558 rep stosb
19559 + pax_force_retaddr
19560 ret
19561 CFI_ENDPROC
19562 ENDPROC(clear_page_c_e)
19563 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19564 leaq 64(%rdi),%rdi
19565 jnz .Lloop
19566 nop
19567 + pax_force_retaddr
19568 ret
19569 CFI_ENDPROC
19570 .Lclear_page_end:
19571 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19572
19573 #include <asm/cpufeature.h>
19574
19575 - .section .altinstr_replacement,"ax"
19576 + .section .altinstr_replacement,"a"
19577 1: .byte 0xeb /* jmp <disp8> */
19578 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19579 2: .byte 0xeb /* jmp <disp8> */
19580 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19581 index 1e572c5..2a162cd 100644
19582 --- a/arch/x86/lib/cmpxchg16b_emu.S
19583 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19584 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19585
19586 popf
19587 mov $1, %al
19588 + pax_force_retaddr
19589 ret
19590
19591 not_same:
19592 popf
19593 xor %al,%al
19594 + pax_force_retaddr
19595 ret
19596
19597 CFI_ENDPROC
19598 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19599 index 01c805b..dccb07f 100644
19600 --- a/arch/x86/lib/copy_page_64.S
19601 +++ b/arch/x86/lib/copy_page_64.S
19602 @@ -9,6 +9,7 @@ copy_page_c:
19603 CFI_STARTPROC
19604 movl $4096/8,%ecx
19605 rep movsq
19606 + pax_force_retaddr
19607 ret
19608 CFI_ENDPROC
19609 ENDPROC(copy_page_c)
19610 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19611 movq 16 (%rsi), %rdx
19612 movq 24 (%rsi), %r8
19613 movq 32 (%rsi), %r9
19614 - movq 40 (%rsi), %r10
19615 + movq 40 (%rsi), %r13
19616 movq 48 (%rsi), %r11
19617 movq 56 (%rsi), %r12
19618
19619 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19620 movq %rdx, 16 (%rdi)
19621 movq %r8, 24 (%rdi)
19622 movq %r9, 32 (%rdi)
19623 - movq %r10, 40 (%rdi)
19624 + movq %r13, 40 (%rdi)
19625 movq %r11, 48 (%rdi)
19626 movq %r12, 56 (%rdi)
19627
19628 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19629 movq 16 (%rsi), %rdx
19630 movq 24 (%rsi), %r8
19631 movq 32 (%rsi), %r9
19632 - movq 40 (%rsi), %r10
19633 + movq 40 (%rsi), %r13
19634 movq 48 (%rsi), %r11
19635 movq 56 (%rsi), %r12
19636
19637 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19638 movq %rdx, 16 (%rdi)
19639 movq %r8, 24 (%rdi)
19640 movq %r9, 32 (%rdi)
19641 - movq %r10, 40 (%rdi)
19642 + movq %r13, 40 (%rdi)
19643 movq %r11, 48 (%rdi)
19644 movq %r12, 56 (%rdi)
19645
19646 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19647 CFI_RESTORE r13
19648 addq $3*8,%rsp
19649 CFI_ADJUST_CFA_OFFSET -3*8
19650 + pax_force_retaddr
19651 ret
19652 .Lcopy_page_end:
19653 CFI_ENDPROC
19654 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19655
19656 #include <asm/cpufeature.h>
19657
19658 - .section .altinstr_replacement,"ax"
19659 + .section .altinstr_replacement,"a"
19660 1: .byte 0xeb /* jmp <disp8> */
19661 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19662 2:
19663 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19664 index 0248402..821c786 100644
19665 --- a/arch/x86/lib/copy_user_64.S
19666 +++ b/arch/x86/lib/copy_user_64.S
19667 @@ -16,6 +16,7 @@
19668 #include <asm/thread_info.h>
19669 #include <asm/cpufeature.h>
19670 #include <asm/alternative-asm.h>
19671 +#include <asm/pgtable.h>
19672
19673 /*
19674 * By placing feature2 after feature1 in altinstructions section, we logically
19675 @@ -29,7 +30,7 @@
19676 .byte 0xe9 /* 32bit jump */
19677 .long \orig-1f /* by default jump to orig */
19678 1:
19679 - .section .altinstr_replacement,"ax"
19680 + .section .altinstr_replacement,"a"
19681 2: .byte 0xe9 /* near jump with 32bit immediate */
19682 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19683 3: .byte 0xe9 /* near jump with 32bit immediate */
19684 @@ -71,47 +72,20 @@
19685 #endif
19686 .endm
19687
19688 -/* Standard copy_to_user with segment limit checking */
19689 -ENTRY(_copy_to_user)
19690 - CFI_STARTPROC
19691 - GET_THREAD_INFO(%rax)
19692 - movq %rdi,%rcx
19693 - addq %rdx,%rcx
19694 - jc bad_to_user
19695 - cmpq TI_addr_limit(%rax),%rcx
19696 - ja bad_to_user
19697 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19698 - copy_user_generic_unrolled,copy_user_generic_string, \
19699 - copy_user_enhanced_fast_string
19700 - CFI_ENDPROC
19701 -ENDPROC(_copy_to_user)
19702 -
19703 -/* Standard copy_from_user with segment limit checking */
19704 -ENTRY(_copy_from_user)
19705 - CFI_STARTPROC
19706 - GET_THREAD_INFO(%rax)
19707 - movq %rsi,%rcx
19708 - addq %rdx,%rcx
19709 - jc bad_from_user
19710 - cmpq TI_addr_limit(%rax),%rcx
19711 - ja bad_from_user
19712 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19713 - copy_user_generic_unrolled,copy_user_generic_string, \
19714 - copy_user_enhanced_fast_string
19715 - CFI_ENDPROC
19716 -ENDPROC(_copy_from_user)
19717 -
19718 .section .fixup,"ax"
19719 /* must zero dest */
19720 ENTRY(bad_from_user)
19721 bad_from_user:
19722 CFI_STARTPROC
19723 + testl %edx,%edx
19724 + js bad_to_user
19725 movl %edx,%ecx
19726 xorl %eax,%eax
19727 rep
19728 stosb
19729 bad_to_user:
19730 movl %edx,%eax
19731 + pax_force_retaddr
19732 ret
19733 CFI_ENDPROC
19734 ENDPROC(bad_from_user)
19735 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19736 jz 17f
19737 1: movq (%rsi),%r8
19738 2: movq 1*8(%rsi),%r9
19739 -3: movq 2*8(%rsi),%r10
19740 +3: movq 2*8(%rsi),%rax
19741 4: movq 3*8(%rsi),%r11
19742 5: movq %r8,(%rdi)
19743 6: movq %r9,1*8(%rdi)
19744 -7: movq %r10,2*8(%rdi)
19745 +7: movq %rax,2*8(%rdi)
19746 8: movq %r11,3*8(%rdi)
19747 9: movq 4*8(%rsi),%r8
19748 10: movq 5*8(%rsi),%r9
19749 -11: movq 6*8(%rsi),%r10
19750 +11: movq 6*8(%rsi),%rax
19751 12: movq 7*8(%rsi),%r11
19752 13: movq %r8,4*8(%rdi)
19753 14: movq %r9,5*8(%rdi)
19754 -15: movq %r10,6*8(%rdi)
19755 +15: movq %rax,6*8(%rdi)
19756 16: movq %r11,7*8(%rdi)
19757 leaq 64(%rsi),%rsi
19758 leaq 64(%rdi),%rdi
19759 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19760 decl %ecx
19761 jnz 21b
19762 23: xor %eax,%eax
19763 + pax_force_retaddr
19764 ret
19765
19766 .section .fixup,"ax"
19767 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19768 3: rep
19769 movsb
19770 4: xorl %eax,%eax
19771 + pax_force_retaddr
19772 ret
19773
19774 .section .fixup,"ax"
19775 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19776 1: rep
19777 movsb
19778 2: xorl %eax,%eax
19779 + pax_force_retaddr
19780 ret
19781
19782 .section .fixup,"ax"
19783 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19784 index cb0c112..e3a6895 100644
19785 --- a/arch/x86/lib/copy_user_nocache_64.S
19786 +++ b/arch/x86/lib/copy_user_nocache_64.S
19787 @@ -8,12 +8,14 @@
19788
19789 #include <linux/linkage.h>
19790 #include <asm/dwarf2.h>
19791 +#include <asm/alternative-asm.h>
19792
19793 #define FIX_ALIGNMENT 1
19794
19795 #include <asm/current.h>
19796 #include <asm/asm-offsets.h>
19797 #include <asm/thread_info.h>
19798 +#include <asm/pgtable.h>
19799
19800 .macro ALIGN_DESTINATION
19801 #ifdef FIX_ALIGNMENT
19802 @@ -50,6 +52,15 @@
19803 */
19804 ENTRY(__copy_user_nocache)
19805 CFI_STARTPROC
19806 +
19807 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19808 + mov $PAX_USER_SHADOW_BASE,%rcx
19809 + cmp %rcx,%rsi
19810 + jae 1f
19811 + add %rcx,%rsi
19812 +1:
19813 +#endif
19814 +
19815 cmpl $8,%edx
19816 jb 20f /* less then 8 bytes, go to byte copy loop */
19817 ALIGN_DESTINATION
19818 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19819 jz 17f
19820 1: movq (%rsi),%r8
19821 2: movq 1*8(%rsi),%r9
19822 -3: movq 2*8(%rsi),%r10
19823 +3: movq 2*8(%rsi),%rax
19824 4: movq 3*8(%rsi),%r11
19825 5: movnti %r8,(%rdi)
19826 6: movnti %r9,1*8(%rdi)
19827 -7: movnti %r10,2*8(%rdi)
19828 +7: movnti %rax,2*8(%rdi)
19829 8: movnti %r11,3*8(%rdi)
19830 9: movq 4*8(%rsi),%r8
19831 10: movq 5*8(%rsi),%r9
19832 -11: movq 6*8(%rsi),%r10
19833 +11: movq 6*8(%rsi),%rax
19834 12: movq 7*8(%rsi),%r11
19835 13: movnti %r8,4*8(%rdi)
19836 14: movnti %r9,5*8(%rdi)
19837 -15: movnti %r10,6*8(%rdi)
19838 +15: movnti %rax,6*8(%rdi)
19839 16: movnti %r11,7*8(%rdi)
19840 leaq 64(%rsi),%rsi
19841 leaq 64(%rdi),%rdi
19842 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19843 jnz 21b
19844 23: xorl %eax,%eax
19845 sfence
19846 + pax_force_retaddr
19847 ret
19848
19849 .section .fixup,"ax"
19850 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19851 index fb903b7..c92b7f7 100644
19852 --- a/arch/x86/lib/csum-copy_64.S
19853 +++ b/arch/x86/lib/csum-copy_64.S
19854 @@ -8,6 +8,7 @@
19855 #include <linux/linkage.h>
19856 #include <asm/dwarf2.h>
19857 #include <asm/errno.h>
19858 +#include <asm/alternative-asm.h>
19859
19860 /*
19861 * Checksum copy with exception handling.
19862 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19863 CFI_RESTORE rbp
19864 addq $7*8, %rsp
19865 CFI_ADJUST_CFA_OFFSET -7*8
19866 + pax_force_retaddr 0, 1
19867 ret
19868 CFI_RESTORE_STATE
19869
19870 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19871 index 459b58a..9570bc7 100644
19872 --- a/arch/x86/lib/csum-wrappers_64.c
19873 +++ b/arch/x86/lib/csum-wrappers_64.c
19874 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19875 len -= 2;
19876 }
19877 }
19878 - isum = csum_partial_copy_generic((__force const void *)src,
19879 +
19880 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19881 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19882 + src += PAX_USER_SHADOW_BASE;
19883 +#endif
19884 +
19885 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19886 dst, len, isum, errp, NULL);
19887 if (unlikely(*errp))
19888 goto out_err;
19889 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19890 }
19891
19892 *errp = 0;
19893 - return csum_partial_copy_generic(src, (void __force *)dst,
19894 +
19895 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19896 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19897 + dst += PAX_USER_SHADOW_BASE;
19898 +#endif
19899 +
19900 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19901 len, isum, NULL, errp);
19902 }
19903 EXPORT_SYMBOL(csum_partial_copy_to_user);
19904 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19905 index 51f1504..ddac4c1 100644
19906 --- a/arch/x86/lib/getuser.S
19907 +++ b/arch/x86/lib/getuser.S
19908 @@ -33,15 +33,38 @@
19909 #include <asm/asm-offsets.h>
19910 #include <asm/thread_info.h>
19911 #include <asm/asm.h>
19912 +#include <asm/segment.h>
19913 +#include <asm/pgtable.h>
19914 +#include <asm/alternative-asm.h>
19915 +
19916 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19917 +#define __copyuser_seg gs;
19918 +#else
19919 +#define __copyuser_seg
19920 +#endif
19921
19922 .text
19923 ENTRY(__get_user_1)
19924 CFI_STARTPROC
19925 +
19926 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19927 GET_THREAD_INFO(%_ASM_DX)
19928 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19929 jae bad_get_user
19930 -1: movzb (%_ASM_AX),%edx
19931 +
19932 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19933 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19934 + cmp %_ASM_DX,%_ASM_AX
19935 + jae 1234f
19936 + add %_ASM_DX,%_ASM_AX
19937 +1234:
19938 +#endif
19939 +
19940 +#endif
19941 +
19942 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19943 xor %eax,%eax
19944 + pax_force_retaddr
19945 ret
19946 CFI_ENDPROC
19947 ENDPROC(__get_user_1)
19948 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19949 ENTRY(__get_user_2)
19950 CFI_STARTPROC
19951 add $1,%_ASM_AX
19952 +
19953 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19954 jc bad_get_user
19955 GET_THREAD_INFO(%_ASM_DX)
19956 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19957 jae bad_get_user
19958 -2: movzwl -1(%_ASM_AX),%edx
19959 +
19960 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19961 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19962 + cmp %_ASM_DX,%_ASM_AX
19963 + jae 1234f
19964 + add %_ASM_DX,%_ASM_AX
19965 +1234:
19966 +#endif
19967 +
19968 +#endif
19969 +
19970 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19971 xor %eax,%eax
19972 + pax_force_retaddr
19973 ret
19974 CFI_ENDPROC
19975 ENDPROC(__get_user_2)
19976 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19977 ENTRY(__get_user_4)
19978 CFI_STARTPROC
19979 add $3,%_ASM_AX
19980 +
19981 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19982 jc bad_get_user
19983 GET_THREAD_INFO(%_ASM_DX)
19984 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19985 jae bad_get_user
19986 -3: mov -3(%_ASM_AX),%edx
19987 +
19988 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19989 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19990 + cmp %_ASM_DX,%_ASM_AX
19991 + jae 1234f
19992 + add %_ASM_DX,%_ASM_AX
19993 +1234:
19994 +#endif
19995 +
19996 +#endif
19997 +
19998 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19999 xor %eax,%eax
20000 + pax_force_retaddr
20001 ret
20002 CFI_ENDPROC
20003 ENDPROC(__get_user_4)
20004 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
20005 GET_THREAD_INFO(%_ASM_DX)
20006 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
20007 jae bad_get_user
20008 +
20009 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20010 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
20011 + cmp %_ASM_DX,%_ASM_AX
20012 + jae 1234f
20013 + add %_ASM_DX,%_ASM_AX
20014 +1234:
20015 +#endif
20016 +
20017 4: movq -7(%_ASM_AX),%_ASM_DX
20018 xor %eax,%eax
20019 + pax_force_retaddr
20020 ret
20021 CFI_ENDPROC
20022 ENDPROC(__get_user_8)
20023 @@ -91,6 +152,7 @@ bad_get_user:
20024 CFI_STARTPROC
20025 xor %edx,%edx
20026 mov $(-EFAULT),%_ASM_AX
20027 + pax_force_retaddr
20028 ret
20029 CFI_ENDPROC
20030 END(bad_get_user)
20031 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
20032 index 374562e..a75830b 100644
20033 --- a/arch/x86/lib/insn.c
20034 +++ b/arch/x86/lib/insn.c
20035 @@ -21,6 +21,11 @@
20036 #include <linux/string.h>
20037 #include <asm/inat.h>
20038 #include <asm/insn.h>
20039 +#ifdef __KERNEL__
20040 +#include <asm/pgtable_types.h>
20041 +#else
20042 +#define ktla_ktva(addr) addr
20043 +#endif
20044
20045 /* Verify next sizeof(t) bytes can be on the same instruction */
20046 #define validate_next(t, insn, n) \
20047 @@ -49,8 +54,8 @@
20048 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
20049 {
20050 memset(insn, 0, sizeof(*insn));
20051 - insn->kaddr = kaddr;
20052 - insn->next_byte = kaddr;
20053 + insn->kaddr = ktla_ktva(kaddr);
20054 + insn->next_byte = ktla_ktva(kaddr);
20055 insn->x86_64 = x86_64 ? 1 : 0;
20056 insn->opnd_bytes = 4;
20057 if (x86_64)
20058 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
20059 index 05a95e7..326f2fa 100644
20060 --- a/arch/x86/lib/iomap_copy_64.S
20061 +++ b/arch/x86/lib/iomap_copy_64.S
20062 @@ -17,6 +17,7 @@
20063
20064 #include <linux/linkage.h>
20065 #include <asm/dwarf2.h>
20066 +#include <asm/alternative-asm.h>
20067
20068 /*
20069 * override generic version in lib/iomap_copy.c
20070 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
20071 CFI_STARTPROC
20072 movl %edx,%ecx
20073 rep movsd
20074 + pax_force_retaddr
20075 ret
20076 CFI_ENDPROC
20077 ENDPROC(__iowrite32_copy)
20078 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
20079 index efbf2a0..8893637 100644
20080 --- a/arch/x86/lib/memcpy_64.S
20081 +++ b/arch/x86/lib/memcpy_64.S
20082 @@ -34,6 +34,7 @@
20083 rep movsq
20084 movl %edx, %ecx
20085 rep movsb
20086 + pax_force_retaddr
20087 ret
20088 .Lmemcpy_e:
20089 .previous
20090 @@ -51,6 +52,7 @@
20091
20092 movl %edx, %ecx
20093 rep movsb
20094 + pax_force_retaddr
20095 ret
20096 .Lmemcpy_e_e:
20097 .previous
20098 @@ -81,13 +83,13 @@ ENTRY(memcpy)
20099 */
20100 movq 0*8(%rsi), %r8
20101 movq 1*8(%rsi), %r9
20102 - movq 2*8(%rsi), %r10
20103 + movq 2*8(%rsi), %rcx
20104 movq 3*8(%rsi), %r11
20105 leaq 4*8(%rsi), %rsi
20106
20107 movq %r8, 0*8(%rdi)
20108 movq %r9, 1*8(%rdi)
20109 - movq %r10, 2*8(%rdi)
20110 + movq %rcx, 2*8(%rdi)
20111 movq %r11, 3*8(%rdi)
20112 leaq 4*8(%rdi), %rdi
20113 jae .Lcopy_forward_loop
20114 @@ -110,12 +112,12 @@ ENTRY(memcpy)
20115 subq $0x20, %rdx
20116 movq -1*8(%rsi), %r8
20117 movq -2*8(%rsi), %r9
20118 - movq -3*8(%rsi), %r10
20119 + movq -3*8(%rsi), %rcx
20120 movq -4*8(%rsi), %r11
20121 leaq -4*8(%rsi), %rsi
20122 movq %r8, -1*8(%rdi)
20123 movq %r9, -2*8(%rdi)
20124 - movq %r10, -3*8(%rdi)
20125 + movq %rcx, -3*8(%rdi)
20126 movq %r11, -4*8(%rdi)
20127 leaq -4*8(%rdi), %rdi
20128 jae .Lcopy_backward_loop
20129 @@ -135,12 +137,13 @@ ENTRY(memcpy)
20130 */
20131 movq 0*8(%rsi), %r8
20132 movq 1*8(%rsi), %r9
20133 - movq -2*8(%rsi, %rdx), %r10
20134 + movq -2*8(%rsi, %rdx), %rcx
20135 movq -1*8(%rsi, %rdx), %r11
20136 movq %r8, 0*8(%rdi)
20137 movq %r9, 1*8(%rdi)
20138 - movq %r10, -2*8(%rdi, %rdx)
20139 + movq %rcx, -2*8(%rdi, %rdx)
20140 movq %r11, -1*8(%rdi, %rdx)
20141 + pax_force_retaddr
20142 retq
20143 .p2align 4
20144 .Lless_16bytes:
20145 @@ -153,6 +156,7 @@ ENTRY(memcpy)
20146 movq -1*8(%rsi, %rdx), %r9
20147 movq %r8, 0*8(%rdi)
20148 movq %r9, -1*8(%rdi, %rdx)
20149 + pax_force_retaddr
20150 retq
20151 .p2align 4
20152 .Lless_8bytes:
20153 @@ -166,6 +170,7 @@ ENTRY(memcpy)
20154 movl -4(%rsi, %rdx), %r8d
20155 movl %ecx, (%rdi)
20156 movl %r8d, -4(%rdi, %rdx)
20157 + pax_force_retaddr
20158 retq
20159 .p2align 4
20160 .Lless_3bytes:
20161 @@ -183,6 +188,7 @@ ENTRY(memcpy)
20162 jnz .Lloop_1
20163
20164 .Lend:
20165 + pax_force_retaddr
20166 retq
20167 CFI_ENDPROC
20168 ENDPROC(memcpy)
20169 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
20170 index ee16461..c39c199 100644
20171 --- a/arch/x86/lib/memmove_64.S
20172 +++ b/arch/x86/lib/memmove_64.S
20173 @@ -61,13 +61,13 @@ ENTRY(memmove)
20174 5:
20175 sub $0x20, %rdx
20176 movq 0*8(%rsi), %r11
20177 - movq 1*8(%rsi), %r10
20178 + movq 1*8(%rsi), %rcx
20179 movq 2*8(%rsi), %r9
20180 movq 3*8(%rsi), %r8
20181 leaq 4*8(%rsi), %rsi
20182
20183 movq %r11, 0*8(%rdi)
20184 - movq %r10, 1*8(%rdi)
20185 + movq %rcx, 1*8(%rdi)
20186 movq %r9, 2*8(%rdi)
20187 movq %r8, 3*8(%rdi)
20188 leaq 4*8(%rdi), %rdi
20189 @@ -81,10 +81,10 @@ ENTRY(memmove)
20190 4:
20191 movq %rdx, %rcx
20192 movq -8(%rsi, %rdx), %r11
20193 - lea -8(%rdi, %rdx), %r10
20194 + lea -8(%rdi, %rdx), %r9
20195 shrq $3, %rcx
20196 rep movsq
20197 - movq %r11, (%r10)
20198 + movq %r11, (%r9)
20199 jmp 13f
20200 .Lmemmove_end_forward:
20201
20202 @@ -95,14 +95,14 @@ ENTRY(memmove)
20203 7:
20204 movq %rdx, %rcx
20205 movq (%rsi), %r11
20206 - movq %rdi, %r10
20207 + movq %rdi, %r9
20208 leaq -8(%rsi, %rdx), %rsi
20209 leaq -8(%rdi, %rdx), %rdi
20210 shrq $3, %rcx
20211 std
20212 rep movsq
20213 cld
20214 - movq %r11, (%r10)
20215 + movq %r11, (%r9)
20216 jmp 13f
20217
20218 /*
20219 @@ -127,13 +127,13 @@ ENTRY(memmove)
20220 8:
20221 subq $0x20, %rdx
20222 movq -1*8(%rsi), %r11
20223 - movq -2*8(%rsi), %r10
20224 + movq -2*8(%rsi), %rcx
20225 movq -3*8(%rsi), %r9
20226 movq -4*8(%rsi), %r8
20227 leaq -4*8(%rsi), %rsi
20228
20229 movq %r11, -1*8(%rdi)
20230 - movq %r10, -2*8(%rdi)
20231 + movq %rcx, -2*8(%rdi)
20232 movq %r9, -3*8(%rdi)
20233 movq %r8, -4*8(%rdi)
20234 leaq -4*8(%rdi), %rdi
20235 @@ -151,11 +151,11 @@ ENTRY(memmove)
20236 * Move data from 16 bytes to 31 bytes.
20237 */
20238 movq 0*8(%rsi), %r11
20239 - movq 1*8(%rsi), %r10
20240 + movq 1*8(%rsi), %rcx
20241 movq -2*8(%rsi, %rdx), %r9
20242 movq -1*8(%rsi, %rdx), %r8
20243 movq %r11, 0*8(%rdi)
20244 - movq %r10, 1*8(%rdi)
20245 + movq %rcx, 1*8(%rdi)
20246 movq %r9, -2*8(%rdi, %rdx)
20247 movq %r8, -1*8(%rdi, %rdx)
20248 jmp 13f
20249 @@ -167,9 +167,9 @@ ENTRY(memmove)
20250 * Move data from 8 bytes to 15 bytes.
20251 */
20252 movq 0*8(%rsi), %r11
20253 - movq -1*8(%rsi, %rdx), %r10
20254 + movq -1*8(%rsi, %rdx), %r9
20255 movq %r11, 0*8(%rdi)
20256 - movq %r10, -1*8(%rdi, %rdx)
20257 + movq %r9, -1*8(%rdi, %rdx)
20258 jmp 13f
20259 10:
20260 cmpq $4, %rdx
20261 @@ -178,9 +178,9 @@ ENTRY(memmove)
20262 * Move data from 4 bytes to 7 bytes.
20263 */
20264 movl (%rsi), %r11d
20265 - movl -4(%rsi, %rdx), %r10d
20266 + movl -4(%rsi, %rdx), %r9d
20267 movl %r11d, (%rdi)
20268 - movl %r10d, -4(%rdi, %rdx)
20269 + movl %r9d, -4(%rdi, %rdx)
20270 jmp 13f
20271 11:
20272 cmp $2, %rdx
20273 @@ -189,9 +189,9 @@ ENTRY(memmove)
20274 * Move data from 2 bytes to 3 bytes.
20275 */
20276 movw (%rsi), %r11w
20277 - movw -2(%rsi, %rdx), %r10w
20278 + movw -2(%rsi, %rdx), %r9w
20279 movw %r11w, (%rdi)
20280 - movw %r10w, -2(%rdi, %rdx)
20281 + movw %r9w, -2(%rdi, %rdx)
20282 jmp 13f
20283 12:
20284 cmp $1, %rdx
20285 @@ -202,6 +202,7 @@ ENTRY(memmove)
20286 movb (%rsi), %r11b
20287 movb %r11b, (%rdi)
20288 13:
20289 + pax_force_retaddr
20290 retq
20291 CFI_ENDPROC
20292
20293 @@ -210,6 +211,7 @@ ENTRY(memmove)
20294 /* Forward moving data. */
20295 movq %rdx, %rcx
20296 rep movsb
20297 + pax_force_retaddr
20298 retq
20299 .Lmemmove_end_forward_efs:
20300 .previous
20301 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20302 index 79bd454..dff325a 100644
20303 --- a/arch/x86/lib/memset_64.S
20304 +++ b/arch/x86/lib/memset_64.S
20305 @@ -31,6 +31,7 @@
20306 movl %r8d,%ecx
20307 rep stosb
20308 movq %r9,%rax
20309 + pax_force_retaddr
20310 ret
20311 .Lmemset_e:
20312 .previous
20313 @@ -53,6 +54,7 @@
20314 movl %edx,%ecx
20315 rep stosb
20316 movq %r9,%rax
20317 + pax_force_retaddr
20318 ret
20319 .Lmemset_e_e:
20320 .previous
20321 @@ -60,13 +62,13 @@
20322 ENTRY(memset)
20323 ENTRY(__memset)
20324 CFI_STARTPROC
20325 - movq %rdi,%r10
20326 movq %rdx,%r11
20327
20328 /* expand byte value */
20329 movzbl %sil,%ecx
20330 movabs $0x0101010101010101,%rax
20331 mul %rcx /* with rax, clobbers rdx */
20332 + movq %rdi,%rdx
20333
20334 /* align dst */
20335 movl %edi,%r9d
20336 @@ -120,7 +122,8 @@ ENTRY(__memset)
20337 jnz .Lloop_1
20338
20339 .Lende:
20340 - movq %r10,%rax
20341 + movq %rdx,%rax
20342 + pax_force_retaddr
20343 ret
20344
20345 CFI_RESTORE_STATE
20346 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20347 index c9f2d9b..e7fd2c0 100644
20348 --- a/arch/x86/lib/mmx_32.c
20349 +++ b/arch/x86/lib/mmx_32.c
20350 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20351 {
20352 void *p;
20353 int i;
20354 + unsigned long cr0;
20355
20356 if (unlikely(in_interrupt()))
20357 return __memcpy(to, from, len);
20358 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20359 kernel_fpu_begin();
20360
20361 __asm__ __volatile__ (
20362 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20363 - " prefetch 64(%0)\n"
20364 - " prefetch 128(%0)\n"
20365 - " prefetch 192(%0)\n"
20366 - " prefetch 256(%0)\n"
20367 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20368 + " prefetch 64(%1)\n"
20369 + " prefetch 128(%1)\n"
20370 + " prefetch 192(%1)\n"
20371 + " prefetch 256(%1)\n"
20372 "2: \n"
20373 ".section .fixup, \"ax\"\n"
20374 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20375 + "3: \n"
20376 +
20377 +#ifdef CONFIG_PAX_KERNEXEC
20378 + " movl %%cr0, %0\n"
20379 + " movl %0, %%eax\n"
20380 + " andl $0xFFFEFFFF, %%eax\n"
20381 + " movl %%eax, %%cr0\n"
20382 +#endif
20383 +
20384 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20385 +
20386 +#ifdef CONFIG_PAX_KERNEXEC
20387 + " movl %0, %%cr0\n"
20388 +#endif
20389 +
20390 " jmp 2b\n"
20391 ".previous\n"
20392 _ASM_EXTABLE(1b, 3b)
20393 - : : "r" (from));
20394 + : "=&r" (cr0) : "r" (from) : "ax");
20395
20396 for ( ; i > 5; i--) {
20397 __asm__ __volatile__ (
20398 - "1: prefetch 320(%0)\n"
20399 - "2: movq (%0), %%mm0\n"
20400 - " movq 8(%0), %%mm1\n"
20401 - " movq 16(%0), %%mm2\n"
20402 - " movq 24(%0), %%mm3\n"
20403 - " movq %%mm0, (%1)\n"
20404 - " movq %%mm1, 8(%1)\n"
20405 - " movq %%mm2, 16(%1)\n"
20406 - " movq %%mm3, 24(%1)\n"
20407 - " movq 32(%0), %%mm0\n"
20408 - " movq 40(%0), %%mm1\n"
20409 - " movq 48(%0), %%mm2\n"
20410 - " movq 56(%0), %%mm3\n"
20411 - " movq %%mm0, 32(%1)\n"
20412 - " movq %%mm1, 40(%1)\n"
20413 - " movq %%mm2, 48(%1)\n"
20414 - " movq %%mm3, 56(%1)\n"
20415 + "1: prefetch 320(%1)\n"
20416 + "2: movq (%1), %%mm0\n"
20417 + " movq 8(%1), %%mm1\n"
20418 + " movq 16(%1), %%mm2\n"
20419 + " movq 24(%1), %%mm3\n"
20420 + " movq %%mm0, (%2)\n"
20421 + " movq %%mm1, 8(%2)\n"
20422 + " movq %%mm2, 16(%2)\n"
20423 + " movq %%mm3, 24(%2)\n"
20424 + " movq 32(%1), %%mm0\n"
20425 + " movq 40(%1), %%mm1\n"
20426 + " movq 48(%1), %%mm2\n"
20427 + " movq 56(%1), %%mm3\n"
20428 + " movq %%mm0, 32(%2)\n"
20429 + " movq %%mm1, 40(%2)\n"
20430 + " movq %%mm2, 48(%2)\n"
20431 + " movq %%mm3, 56(%2)\n"
20432 ".section .fixup, \"ax\"\n"
20433 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20434 + "3:\n"
20435 +
20436 +#ifdef CONFIG_PAX_KERNEXEC
20437 + " movl %%cr0, %0\n"
20438 + " movl %0, %%eax\n"
20439 + " andl $0xFFFEFFFF, %%eax\n"
20440 + " movl %%eax, %%cr0\n"
20441 +#endif
20442 +
20443 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20444 +
20445 +#ifdef CONFIG_PAX_KERNEXEC
20446 + " movl %0, %%cr0\n"
20447 +#endif
20448 +
20449 " jmp 2b\n"
20450 ".previous\n"
20451 _ASM_EXTABLE(1b, 3b)
20452 - : : "r" (from), "r" (to) : "memory");
20453 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20454
20455 from += 64;
20456 to += 64;
20457 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20458 static void fast_copy_page(void *to, void *from)
20459 {
20460 int i;
20461 + unsigned long cr0;
20462
20463 kernel_fpu_begin();
20464
20465 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20466 * but that is for later. -AV
20467 */
20468 __asm__ __volatile__(
20469 - "1: prefetch (%0)\n"
20470 - " prefetch 64(%0)\n"
20471 - " prefetch 128(%0)\n"
20472 - " prefetch 192(%0)\n"
20473 - " prefetch 256(%0)\n"
20474 + "1: prefetch (%1)\n"
20475 + " prefetch 64(%1)\n"
20476 + " prefetch 128(%1)\n"
20477 + " prefetch 192(%1)\n"
20478 + " prefetch 256(%1)\n"
20479 "2: \n"
20480 ".section .fixup, \"ax\"\n"
20481 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20482 + "3: \n"
20483 +
20484 +#ifdef CONFIG_PAX_KERNEXEC
20485 + " movl %%cr0, %0\n"
20486 + " movl %0, %%eax\n"
20487 + " andl $0xFFFEFFFF, %%eax\n"
20488 + " movl %%eax, %%cr0\n"
20489 +#endif
20490 +
20491 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20492 +
20493 +#ifdef CONFIG_PAX_KERNEXEC
20494 + " movl %0, %%cr0\n"
20495 +#endif
20496 +
20497 " jmp 2b\n"
20498 ".previous\n"
20499 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20500 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20501
20502 for (i = 0; i < (4096-320)/64; i++) {
20503 __asm__ __volatile__ (
20504 - "1: prefetch 320(%0)\n"
20505 - "2: movq (%0), %%mm0\n"
20506 - " movntq %%mm0, (%1)\n"
20507 - " movq 8(%0), %%mm1\n"
20508 - " movntq %%mm1, 8(%1)\n"
20509 - " movq 16(%0), %%mm2\n"
20510 - " movntq %%mm2, 16(%1)\n"
20511 - " movq 24(%0), %%mm3\n"
20512 - " movntq %%mm3, 24(%1)\n"
20513 - " movq 32(%0), %%mm4\n"
20514 - " movntq %%mm4, 32(%1)\n"
20515 - " movq 40(%0), %%mm5\n"
20516 - " movntq %%mm5, 40(%1)\n"
20517 - " movq 48(%0), %%mm6\n"
20518 - " movntq %%mm6, 48(%1)\n"
20519 - " movq 56(%0), %%mm7\n"
20520 - " movntq %%mm7, 56(%1)\n"
20521 + "1: prefetch 320(%1)\n"
20522 + "2: movq (%1), %%mm0\n"
20523 + " movntq %%mm0, (%2)\n"
20524 + " movq 8(%1), %%mm1\n"
20525 + " movntq %%mm1, 8(%2)\n"
20526 + " movq 16(%1), %%mm2\n"
20527 + " movntq %%mm2, 16(%2)\n"
20528 + " movq 24(%1), %%mm3\n"
20529 + " movntq %%mm3, 24(%2)\n"
20530 + " movq 32(%1), %%mm4\n"
20531 + " movntq %%mm4, 32(%2)\n"
20532 + " movq 40(%1), %%mm5\n"
20533 + " movntq %%mm5, 40(%2)\n"
20534 + " movq 48(%1), %%mm6\n"
20535 + " movntq %%mm6, 48(%2)\n"
20536 + " movq 56(%1), %%mm7\n"
20537 + " movntq %%mm7, 56(%2)\n"
20538 ".section .fixup, \"ax\"\n"
20539 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20540 + "3:\n"
20541 +
20542 +#ifdef CONFIG_PAX_KERNEXEC
20543 + " movl %%cr0, %0\n"
20544 + " movl %0, %%eax\n"
20545 + " andl $0xFFFEFFFF, %%eax\n"
20546 + " movl %%eax, %%cr0\n"
20547 +#endif
20548 +
20549 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20550 +
20551 +#ifdef CONFIG_PAX_KERNEXEC
20552 + " movl %0, %%cr0\n"
20553 +#endif
20554 +
20555 " jmp 2b\n"
20556 ".previous\n"
20557 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20558 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20559
20560 from += 64;
20561 to += 64;
20562 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20563 static void fast_copy_page(void *to, void *from)
20564 {
20565 int i;
20566 + unsigned long cr0;
20567
20568 kernel_fpu_begin();
20569
20570 __asm__ __volatile__ (
20571 - "1: prefetch (%0)\n"
20572 - " prefetch 64(%0)\n"
20573 - " prefetch 128(%0)\n"
20574 - " prefetch 192(%0)\n"
20575 - " prefetch 256(%0)\n"
20576 + "1: prefetch (%1)\n"
20577 + " prefetch 64(%1)\n"
20578 + " prefetch 128(%1)\n"
20579 + " prefetch 192(%1)\n"
20580 + " prefetch 256(%1)\n"
20581 "2: \n"
20582 ".section .fixup, \"ax\"\n"
20583 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20584 + "3: \n"
20585 +
20586 +#ifdef CONFIG_PAX_KERNEXEC
20587 + " movl %%cr0, %0\n"
20588 + " movl %0, %%eax\n"
20589 + " andl $0xFFFEFFFF, %%eax\n"
20590 + " movl %%eax, %%cr0\n"
20591 +#endif
20592 +
20593 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20594 +
20595 +#ifdef CONFIG_PAX_KERNEXEC
20596 + " movl %0, %%cr0\n"
20597 +#endif
20598 +
20599 " jmp 2b\n"
20600 ".previous\n"
20601 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20602 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20603
20604 for (i = 0; i < 4096/64; i++) {
20605 __asm__ __volatile__ (
20606 - "1: prefetch 320(%0)\n"
20607 - "2: movq (%0), %%mm0\n"
20608 - " movq 8(%0), %%mm1\n"
20609 - " movq 16(%0), %%mm2\n"
20610 - " movq 24(%0), %%mm3\n"
20611 - " movq %%mm0, (%1)\n"
20612 - " movq %%mm1, 8(%1)\n"
20613 - " movq %%mm2, 16(%1)\n"
20614 - " movq %%mm3, 24(%1)\n"
20615 - " movq 32(%0), %%mm0\n"
20616 - " movq 40(%0), %%mm1\n"
20617 - " movq 48(%0), %%mm2\n"
20618 - " movq 56(%0), %%mm3\n"
20619 - " movq %%mm0, 32(%1)\n"
20620 - " movq %%mm1, 40(%1)\n"
20621 - " movq %%mm2, 48(%1)\n"
20622 - " movq %%mm3, 56(%1)\n"
20623 + "1: prefetch 320(%1)\n"
20624 + "2: movq (%1), %%mm0\n"
20625 + " movq 8(%1), %%mm1\n"
20626 + " movq 16(%1), %%mm2\n"
20627 + " movq 24(%1), %%mm3\n"
20628 + " movq %%mm0, (%2)\n"
20629 + " movq %%mm1, 8(%2)\n"
20630 + " movq %%mm2, 16(%2)\n"
20631 + " movq %%mm3, 24(%2)\n"
20632 + " movq 32(%1), %%mm0\n"
20633 + " movq 40(%1), %%mm1\n"
20634 + " movq 48(%1), %%mm2\n"
20635 + " movq 56(%1), %%mm3\n"
20636 + " movq %%mm0, 32(%2)\n"
20637 + " movq %%mm1, 40(%2)\n"
20638 + " movq %%mm2, 48(%2)\n"
20639 + " movq %%mm3, 56(%2)\n"
20640 ".section .fixup, \"ax\"\n"
20641 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20642 + "3:\n"
20643 +
20644 +#ifdef CONFIG_PAX_KERNEXEC
20645 + " movl %%cr0, %0\n"
20646 + " movl %0, %%eax\n"
20647 + " andl $0xFFFEFFFF, %%eax\n"
20648 + " movl %%eax, %%cr0\n"
20649 +#endif
20650 +
20651 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20652 +
20653 +#ifdef CONFIG_PAX_KERNEXEC
20654 + " movl %0, %%cr0\n"
20655 +#endif
20656 +
20657 " jmp 2b\n"
20658 ".previous\n"
20659 _ASM_EXTABLE(1b, 3b)
20660 - : : "r" (from), "r" (to) : "memory");
20661 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20662
20663 from += 64;
20664 to += 64;
20665 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20666 index 69fa106..adda88b 100644
20667 --- a/arch/x86/lib/msr-reg.S
20668 +++ b/arch/x86/lib/msr-reg.S
20669 @@ -3,6 +3,7 @@
20670 #include <asm/dwarf2.h>
20671 #include <asm/asm.h>
20672 #include <asm/msr.h>
20673 +#include <asm/alternative-asm.h>
20674
20675 #ifdef CONFIG_X86_64
20676 /*
20677 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20678 CFI_STARTPROC
20679 pushq_cfi %rbx
20680 pushq_cfi %rbp
20681 - movq %rdi, %r10 /* Save pointer */
20682 + movq %rdi, %r9 /* Save pointer */
20683 xorl %r11d, %r11d /* Return value */
20684 movl (%rdi), %eax
20685 movl 4(%rdi), %ecx
20686 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20687 movl 28(%rdi), %edi
20688 CFI_REMEMBER_STATE
20689 1: \op
20690 -2: movl %eax, (%r10)
20691 +2: movl %eax, (%r9)
20692 movl %r11d, %eax /* Return value */
20693 - movl %ecx, 4(%r10)
20694 - movl %edx, 8(%r10)
20695 - movl %ebx, 12(%r10)
20696 - movl %ebp, 20(%r10)
20697 - movl %esi, 24(%r10)
20698 - movl %edi, 28(%r10)
20699 + movl %ecx, 4(%r9)
20700 + movl %edx, 8(%r9)
20701 + movl %ebx, 12(%r9)
20702 + movl %ebp, 20(%r9)
20703 + movl %esi, 24(%r9)
20704 + movl %edi, 28(%r9)
20705 popq_cfi %rbp
20706 popq_cfi %rbx
20707 + pax_force_retaddr
20708 ret
20709 3:
20710 CFI_RESTORE_STATE
20711 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20712 index 36b0d15..d381858 100644
20713 --- a/arch/x86/lib/putuser.S
20714 +++ b/arch/x86/lib/putuser.S
20715 @@ -15,7 +15,9 @@
20716 #include <asm/thread_info.h>
20717 #include <asm/errno.h>
20718 #include <asm/asm.h>
20719 -
20720 +#include <asm/segment.h>
20721 +#include <asm/pgtable.h>
20722 +#include <asm/alternative-asm.h>
20723
20724 /*
20725 * __put_user_X
20726 @@ -29,52 +31,119 @@
20727 * as they get called from within inline assembly.
20728 */
20729
20730 -#define ENTER CFI_STARTPROC ; \
20731 - GET_THREAD_INFO(%_ASM_BX)
20732 -#define EXIT ret ; \
20733 +#define ENTER CFI_STARTPROC
20734 +#define EXIT pax_force_retaddr; ret ; \
20735 CFI_ENDPROC
20736
20737 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20738 +#define _DEST %_ASM_CX,%_ASM_BX
20739 +#else
20740 +#define _DEST %_ASM_CX
20741 +#endif
20742 +
20743 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20744 +#define __copyuser_seg gs;
20745 +#else
20746 +#define __copyuser_seg
20747 +#endif
20748 +
20749 .text
20750 ENTRY(__put_user_1)
20751 ENTER
20752 +
20753 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20754 + GET_THREAD_INFO(%_ASM_BX)
20755 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20756 jae bad_put_user
20757 -1: movb %al,(%_ASM_CX)
20758 +
20759 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20760 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20761 + cmp %_ASM_BX,%_ASM_CX
20762 + jb 1234f
20763 + xor %ebx,%ebx
20764 +1234:
20765 +#endif
20766 +
20767 +#endif
20768 +
20769 +1: __copyuser_seg movb %al,(_DEST)
20770 xor %eax,%eax
20771 EXIT
20772 ENDPROC(__put_user_1)
20773
20774 ENTRY(__put_user_2)
20775 ENTER
20776 +
20777 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20778 + GET_THREAD_INFO(%_ASM_BX)
20779 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20780 sub $1,%_ASM_BX
20781 cmp %_ASM_BX,%_ASM_CX
20782 jae bad_put_user
20783 -2: movw %ax,(%_ASM_CX)
20784 +
20785 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20786 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20787 + cmp %_ASM_BX,%_ASM_CX
20788 + jb 1234f
20789 + xor %ebx,%ebx
20790 +1234:
20791 +#endif
20792 +
20793 +#endif
20794 +
20795 +2: __copyuser_seg movw %ax,(_DEST)
20796 xor %eax,%eax
20797 EXIT
20798 ENDPROC(__put_user_2)
20799
20800 ENTRY(__put_user_4)
20801 ENTER
20802 +
20803 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20804 + GET_THREAD_INFO(%_ASM_BX)
20805 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20806 sub $3,%_ASM_BX
20807 cmp %_ASM_BX,%_ASM_CX
20808 jae bad_put_user
20809 -3: movl %eax,(%_ASM_CX)
20810 +
20811 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20812 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20813 + cmp %_ASM_BX,%_ASM_CX
20814 + jb 1234f
20815 + xor %ebx,%ebx
20816 +1234:
20817 +#endif
20818 +
20819 +#endif
20820 +
20821 +3: __copyuser_seg movl %eax,(_DEST)
20822 xor %eax,%eax
20823 EXIT
20824 ENDPROC(__put_user_4)
20825
20826 ENTRY(__put_user_8)
20827 ENTER
20828 +
20829 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20830 + GET_THREAD_INFO(%_ASM_BX)
20831 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20832 sub $7,%_ASM_BX
20833 cmp %_ASM_BX,%_ASM_CX
20834 jae bad_put_user
20835 -4: mov %_ASM_AX,(%_ASM_CX)
20836 +
20837 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20838 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20839 + cmp %_ASM_BX,%_ASM_CX
20840 + jb 1234f
20841 + xor %ebx,%ebx
20842 +1234:
20843 +#endif
20844 +
20845 +#endif
20846 +
20847 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20848 #ifdef CONFIG_X86_32
20849 -5: movl %edx,4(%_ASM_CX)
20850 +5: __copyuser_seg movl %edx,4(_DEST)
20851 #endif
20852 xor %eax,%eax
20853 EXIT
20854 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20855 index 1cad221..de671ee 100644
20856 --- a/arch/x86/lib/rwlock.S
20857 +++ b/arch/x86/lib/rwlock.S
20858 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20859 FRAME
20860 0: LOCK_PREFIX
20861 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20862 +
20863 +#ifdef CONFIG_PAX_REFCOUNT
20864 + jno 1234f
20865 + LOCK_PREFIX
20866 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20867 + int $4
20868 +1234:
20869 + _ASM_EXTABLE(1234b, 1234b)
20870 +#endif
20871 +
20872 1: rep; nop
20873 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20874 jne 1b
20875 LOCK_PREFIX
20876 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20877 +
20878 +#ifdef CONFIG_PAX_REFCOUNT
20879 + jno 1234f
20880 + LOCK_PREFIX
20881 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20882 + int $4
20883 +1234:
20884 + _ASM_EXTABLE(1234b, 1234b)
20885 +#endif
20886 +
20887 jnz 0b
20888 ENDFRAME
20889 + pax_force_retaddr
20890 ret
20891 CFI_ENDPROC
20892 END(__write_lock_failed)
20893 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20894 FRAME
20895 0: LOCK_PREFIX
20896 READ_LOCK_SIZE(inc) (%__lock_ptr)
20897 +
20898 +#ifdef CONFIG_PAX_REFCOUNT
20899 + jno 1234f
20900 + LOCK_PREFIX
20901 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20902 + int $4
20903 +1234:
20904 + _ASM_EXTABLE(1234b, 1234b)
20905 +#endif
20906 +
20907 1: rep; nop
20908 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20909 js 1b
20910 LOCK_PREFIX
20911 READ_LOCK_SIZE(dec) (%__lock_ptr)
20912 +
20913 +#ifdef CONFIG_PAX_REFCOUNT
20914 + jno 1234f
20915 + LOCK_PREFIX
20916 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20917 + int $4
20918 +1234:
20919 + _ASM_EXTABLE(1234b, 1234b)
20920 +#endif
20921 +
20922 js 0b
20923 ENDFRAME
20924 + pax_force_retaddr
20925 ret
20926 CFI_ENDPROC
20927 END(__read_lock_failed)
20928 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20929 index 5dff5f0..cadebf4 100644
20930 --- a/arch/x86/lib/rwsem.S
20931 +++ b/arch/x86/lib/rwsem.S
20932 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20933 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20934 CFI_RESTORE __ASM_REG(dx)
20935 restore_common_regs
20936 + pax_force_retaddr
20937 ret
20938 CFI_ENDPROC
20939 ENDPROC(call_rwsem_down_read_failed)
20940 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20941 movq %rax,%rdi
20942 call rwsem_down_write_failed
20943 restore_common_regs
20944 + pax_force_retaddr
20945 ret
20946 CFI_ENDPROC
20947 ENDPROC(call_rwsem_down_write_failed)
20948 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20949 movq %rax,%rdi
20950 call rwsem_wake
20951 restore_common_regs
20952 -1: ret
20953 +1: pax_force_retaddr
20954 + ret
20955 CFI_ENDPROC
20956 ENDPROC(call_rwsem_wake)
20957
20958 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20959 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20960 CFI_RESTORE __ASM_REG(dx)
20961 restore_common_regs
20962 + pax_force_retaddr
20963 ret
20964 CFI_ENDPROC
20965 ENDPROC(call_rwsem_downgrade_wake)
20966 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20967 index a63efd6..ccecad8 100644
20968 --- a/arch/x86/lib/thunk_64.S
20969 +++ b/arch/x86/lib/thunk_64.S
20970 @@ -8,6 +8,7 @@
20971 #include <linux/linkage.h>
20972 #include <asm/dwarf2.h>
20973 #include <asm/calling.h>
20974 +#include <asm/alternative-asm.h>
20975
20976 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20977 .macro THUNK name, func, put_ret_addr_in_rdi=0
20978 @@ -41,5 +42,6 @@
20979 SAVE_ARGS
20980 restore:
20981 RESTORE_ARGS
20982 + pax_force_retaddr
20983 ret
20984 CFI_ENDPROC
20985 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20986 index e218d5d..35679b4 100644
20987 --- a/arch/x86/lib/usercopy_32.c
20988 +++ b/arch/x86/lib/usercopy_32.c
20989 @@ -43,7 +43,7 @@ do { \
20990 __asm__ __volatile__( \
20991 " testl %1,%1\n" \
20992 " jz 2f\n" \
20993 - "0: lodsb\n" \
20994 + "0: "__copyuser_seg"lodsb\n" \
20995 " stosb\n" \
20996 " testb %%al,%%al\n" \
20997 " jz 1f\n" \
20998 @@ -128,10 +128,12 @@ do { \
20999 int __d0; \
21000 might_fault(); \
21001 __asm__ __volatile__( \
21002 + __COPYUSER_SET_ES \
21003 "0: rep; stosl\n" \
21004 " movl %2,%0\n" \
21005 "1: rep; stosb\n" \
21006 "2:\n" \
21007 + __COPYUSER_RESTORE_ES \
21008 ".section .fixup,\"ax\"\n" \
21009 "3: lea 0(%2,%0,4),%0\n" \
21010 " jmp 2b\n" \
21011 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
21012 might_fault();
21013
21014 __asm__ __volatile__(
21015 + __COPYUSER_SET_ES
21016 " testl %0, %0\n"
21017 " jz 3f\n"
21018 " andl %0,%%ecx\n"
21019 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
21020 " subl %%ecx,%0\n"
21021 " addl %0,%%eax\n"
21022 "1:\n"
21023 + __COPYUSER_RESTORE_ES
21024 ".section .fixup,\"ax\"\n"
21025 "2: xorl %%eax,%%eax\n"
21026 " jmp 1b\n"
21027 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
21028
21029 #ifdef CONFIG_X86_INTEL_USERCOPY
21030 static unsigned long
21031 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
21032 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
21033 {
21034 int d0, d1;
21035 __asm__ __volatile__(
21036 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21037 " .align 2,0x90\n"
21038 "3: movl 0(%4), %%eax\n"
21039 "4: movl 4(%4), %%edx\n"
21040 - "5: movl %%eax, 0(%3)\n"
21041 - "6: movl %%edx, 4(%3)\n"
21042 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
21043 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
21044 "7: movl 8(%4), %%eax\n"
21045 "8: movl 12(%4),%%edx\n"
21046 - "9: movl %%eax, 8(%3)\n"
21047 - "10: movl %%edx, 12(%3)\n"
21048 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
21049 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
21050 "11: movl 16(%4), %%eax\n"
21051 "12: movl 20(%4), %%edx\n"
21052 - "13: movl %%eax, 16(%3)\n"
21053 - "14: movl %%edx, 20(%3)\n"
21054 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
21055 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
21056 "15: movl 24(%4), %%eax\n"
21057 "16: movl 28(%4), %%edx\n"
21058 - "17: movl %%eax, 24(%3)\n"
21059 - "18: movl %%edx, 28(%3)\n"
21060 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
21061 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
21062 "19: movl 32(%4), %%eax\n"
21063 "20: movl 36(%4), %%edx\n"
21064 - "21: movl %%eax, 32(%3)\n"
21065 - "22: movl %%edx, 36(%3)\n"
21066 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
21067 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
21068 "23: movl 40(%4), %%eax\n"
21069 "24: movl 44(%4), %%edx\n"
21070 - "25: movl %%eax, 40(%3)\n"
21071 - "26: movl %%edx, 44(%3)\n"
21072 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
21073 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
21074 "27: movl 48(%4), %%eax\n"
21075 "28: movl 52(%4), %%edx\n"
21076 - "29: movl %%eax, 48(%3)\n"
21077 - "30: movl %%edx, 52(%3)\n"
21078 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
21079 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
21080 "31: movl 56(%4), %%eax\n"
21081 "32: movl 60(%4), %%edx\n"
21082 - "33: movl %%eax, 56(%3)\n"
21083 - "34: movl %%edx, 60(%3)\n"
21084 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
21085 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
21086 " addl $-64, %0\n"
21087 " addl $64, %4\n"
21088 " addl $64, %3\n"
21089 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21090 " shrl $2, %0\n"
21091 " andl $3, %%eax\n"
21092 " cld\n"
21093 + __COPYUSER_SET_ES
21094 "99: rep; movsl\n"
21095 "36: movl %%eax, %0\n"
21096 "37: rep; movsb\n"
21097 "100:\n"
21098 + __COPYUSER_RESTORE_ES
21099 + ".section .fixup,\"ax\"\n"
21100 + "101: lea 0(%%eax,%0,4),%0\n"
21101 + " jmp 100b\n"
21102 + ".previous\n"
21103 + ".section __ex_table,\"a\"\n"
21104 + " .align 4\n"
21105 + " .long 1b,100b\n"
21106 + " .long 2b,100b\n"
21107 + " .long 3b,100b\n"
21108 + " .long 4b,100b\n"
21109 + " .long 5b,100b\n"
21110 + " .long 6b,100b\n"
21111 + " .long 7b,100b\n"
21112 + " .long 8b,100b\n"
21113 + " .long 9b,100b\n"
21114 + " .long 10b,100b\n"
21115 + " .long 11b,100b\n"
21116 + " .long 12b,100b\n"
21117 + " .long 13b,100b\n"
21118 + " .long 14b,100b\n"
21119 + " .long 15b,100b\n"
21120 + " .long 16b,100b\n"
21121 + " .long 17b,100b\n"
21122 + " .long 18b,100b\n"
21123 + " .long 19b,100b\n"
21124 + " .long 20b,100b\n"
21125 + " .long 21b,100b\n"
21126 + " .long 22b,100b\n"
21127 + " .long 23b,100b\n"
21128 + " .long 24b,100b\n"
21129 + " .long 25b,100b\n"
21130 + " .long 26b,100b\n"
21131 + " .long 27b,100b\n"
21132 + " .long 28b,100b\n"
21133 + " .long 29b,100b\n"
21134 + " .long 30b,100b\n"
21135 + " .long 31b,100b\n"
21136 + " .long 32b,100b\n"
21137 + " .long 33b,100b\n"
21138 + " .long 34b,100b\n"
21139 + " .long 35b,100b\n"
21140 + " .long 36b,100b\n"
21141 + " .long 37b,100b\n"
21142 + " .long 99b,101b\n"
21143 + ".previous"
21144 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
21145 + : "1"(to), "2"(from), "0"(size)
21146 + : "eax", "edx", "memory");
21147 + return size;
21148 +}
21149 +
21150 +static unsigned long
21151 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
21152 +{
21153 + int d0, d1;
21154 + __asm__ __volatile__(
21155 + " .align 2,0x90\n"
21156 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
21157 + " cmpl $67, %0\n"
21158 + " jbe 3f\n"
21159 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
21160 + " .align 2,0x90\n"
21161 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
21162 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
21163 + "5: movl %%eax, 0(%3)\n"
21164 + "6: movl %%edx, 4(%3)\n"
21165 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
21166 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
21167 + "9: movl %%eax, 8(%3)\n"
21168 + "10: movl %%edx, 12(%3)\n"
21169 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
21170 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
21171 + "13: movl %%eax, 16(%3)\n"
21172 + "14: movl %%edx, 20(%3)\n"
21173 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
21174 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
21175 + "17: movl %%eax, 24(%3)\n"
21176 + "18: movl %%edx, 28(%3)\n"
21177 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
21178 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
21179 + "21: movl %%eax, 32(%3)\n"
21180 + "22: movl %%edx, 36(%3)\n"
21181 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
21182 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
21183 + "25: movl %%eax, 40(%3)\n"
21184 + "26: movl %%edx, 44(%3)\n"
21185 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
21186 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
21187 + "29: movl %%eax, 48(%3)\n"
21188 + "30: movl %%edx, 52(%3)\n"
21189 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
21190 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
21191 + "33: movl %%eax, 56(%3)\n"
21192 + "34: movl %%edx, 60(%3)\n"
21193 + " addl $-64, %0\n"
21194 + " addl $64, %4\n"
21195 + " addl $64, %3\n"
21196 + " cmpl $63, %0\n"
21197 + " ja 1b\n"
21198 + "35: movl %0, %%eax\n"
21199 + " shrl $2, %0\n"
21200 + " andl $3, %%eax\n"
21201 + " cld\n"
21202 + "99: rep; "__copyuser_seg" movsl\n"
21203 + "36: movl %%eax, %0\n"
21204 + "37: rep; "__copyuser_seg" movsb\n"
21205 + "100:\n"
21206 ".section .fixup,\"ax\"\n"
21207 "101: lea 0(%%eax,%0,4),%0\n"
21208 " jmp 100b\n"
21209 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21210 int d0, d1;
21211 __asm__ __volatile__(
21212 " .align 2,0x90\n"
21213 - "0: movl 32(%4), %%eax\n"
21214 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21215 " cmpl $67, %0\n"
21216 " jbe 2f\n"
21217 - "1: movl 64(%4), %%eax\n"
21218 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21219 " .align 2,0x90\n"
21220 - "2: movl 0(%4), %%eax\n"
21221 - "21: movl 4(%4), %%edx\n"
21222 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21223 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21224 " movl %%eax, 0(%3)\n"
21225 " movl %%edx, 4(%3)\n"
21226 - "3: movl 8(%4), %%eax\n"
21227 - "31: movl 12(%4),%%edx\n"
21228 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21229 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21230 " movl %%eax, 8(%3)\n"
21231 " movl %%edx, 12(%3)\n"
21232 - "4: movl 16(%4), %%eax\n"
21233 - "41: movl 20(%4), %%edx\n"
21234 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21235 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21236 " movl %%eax, 16(%3)\n"
21237 " movl %%edx, 20(%3)\n"
21238 - "10: movl 24(%4), %%eax\n"
21239 - "51: movl 28(%4), %%edx\n"
21240 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21241 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21242 " movl %%eax, 24(%3)\n"
21243 " movl %%edx, 28(%3)\n"
21244 - "11: movl 32(%4), %%eax\n"
21245 - "61: movl 36(%4), %%edx\n"
21246 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21247 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21248 " movl %%eax, 32(%3)\n"
21249 " movl %%edx, 36(%3)\n"
21250 - "12: movl 40(%4), %%eax\n"
21251 - "71: movl 44(%4), %%edx\n"
21252 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21253 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21254 " movl %%eax, 40(%3)\n"
21255 " movl %%edx, 44(%3)\n"
21256 - "13: movl 48(%4), %%eax\n"
21257 - "81: movl 52(%4), %%edx\n"
21258 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21259 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21260 " movl %%eax, 48(%3)\n"
21261 " movl %%edx, 52(%3)\n"
21262 - "14: movl 56(%4), %%eax\n"
21263 - "91: movl 60(%4), %%edx\n"
21264 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21265 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21266 " movl %%eax, 56(%3)\n"
21267 " movl %%edx, 60(%3)\n"
21268 " addl $-64, %0\n"
21269 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21270 " shrl $2, %0\n"
21271 " andl $3, %%eax\n"
21272 " cld\n"
21273 - "6: rep; movsl\n"
21274 + "6: rep; "__copyuser_seg" movsl\n"
21275 " movl %%eax,%0\n"
21276 - "7: rep; movsb\n"
21277 + "7: rep; "__copyuser_seg" movsb\n"
21278 "8:\n"
21279 ".section .fixup,\"ax\"\n"
21280 "9: lea 0(%%eax,%0,4),%0\n"
21281 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21282
21283 __asm__ __volatile__(
21284 " .align 2,0x90\n"
21285 - "0: movl 32(%4), %%eax\n"
21286 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21287 " cmpl $67, %0\n"
21288 " jbe 2f\n"
21289 - "1: movl 64(%4), %%eax\n"
21290 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21291 " .align 2,0x90\n"
21292 - "2: movl 0(%4), %%eax\n"
21293 - "21: movl 4(%4), %%edx\n"
21294 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21295 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21296 " movnti %%eax, 0(%3)\n"
21297 " movnti %%edx, 4(%3)\n"
21298 - "3: movl 8(%4), %%eax\n"
21299 - "31: movl 12(%4),%%edx\n"
21300 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21301 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21302 " movnti %%eax, 8(%3)\n"
21303 " movnti %%edx, 12(%3)\n"
21304 - "4: movl 16(%4), %%eax\n"
21305 - "41: movl 20(%4), %%edx\n"
21306 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21307 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21308 " movnti %%eax, 16(%3)\n"
21309 " movnti %%edx, 20(%3)\n"
21310 - "10: movl 24(%4), %%eax\n"
21311 - "51: movl 28(%4), %%edx\n"
21312 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21313 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21314 " movnti %%eax, 24(%3)\n"
21315 " movnti %%edx, 28(%3)\n"
21316 - "11: movl 32(%4), %%eax\n"
21317 - "61: movl 36(%4), %%edx\n"
21318 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21319 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21320 " movnti %%eax, 32(%3)\n"
21321 " movnti %%edx, 36(%3)\n"
21322 - "12: movl 40(%4), %%eax\n"
21323 - "71: movl 44(%4), %%edx\n"
21324 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21325 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21326 " movnti %%eax, 40(%3)\n"
21327 " movnti %%edx, 44(%3)\n"
21328 - "13: movl 48(%4), %%eax\n"
21329 - "81: movl 52(%4), %%edx\n"
21330 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21331 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21332 " movnti %%eax, 48(%3)\n"
21333 " movnti %%edx, 52(%3)\n"
21334 - "14: movl 56(%4), %%eax\n"
21335 - "91: movl 60(%4), %%edx\n"
21336 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21337 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21338 " movnti %%eax, 56(%3)\n"
21339 " movnti %%edx, 60(%3)\n"
21340 " addl $-64, %0\n"
21341 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21342 " shrl $2, %0\n"
21343 " andl $3, %%eax\n"
21344 " cld\n"
21345 - "6: rep; movsl\n"
21346 + "6: rep; "__copyuser_seg" movsl\n"
21347 " movl %%eax,%0\n"
21348 - "7: rep; movsb\n"
21349 + "7: rep; "__copyuser_seg" movsb\n"
21350 "8:\n"
21351 ".section .fixup,\"ax\"\n"
21352 "9: lea 0(%%eax,%0,4),%0\n"
21353 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21354
21355 __asm__ __volatile__(
21356 " .align 2,0x90\n"
21357 - "0: movl 32(%4), %%eax\n"
21358 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21359 " cmpl $67, %0\n"
21360 " jbe 2f\n"
21361 - "1: movl 64(%4), %%eax\n"
21362 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21363 " .align 2,0x90\n"
21364 - "2: movl 0(%4), %%eax\n"
21365 - "21: movl 4(%4), %%edx\n"
21366 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21367 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21368 " movnti %%eax, 0(%3)\n"
21369 " movnti %%edx, 4(%3)\n"
21370 - "3: movl 8(%4), %%eax\n"
21371 - "31: movl 12(%4),%%edx\n"
21372 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21373 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21374 " movnti %%eax, 8(%3)\n"
21375 " movnti %%edx, 12(%3)\n"
21376 - "4: movl 16(%4), %%eax\n"
21377 - "41: movl 20(%4), %%edx\n"
21378 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21379 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21380 " movnti %%eax, 16(%3)\n"
21381 " movnti %%edx, 20(%3)\n"
21382 - "10: movl 24(%4), %%eax\n"
21383 - "51: movl 28(%4), %%edx\n"
21384 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21385 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21386 " movnti %%eax, 24(%3)\n"
21387 " movnti %%edx, 28(%3)\n"
21388 - "11: movl 32(%4), %%eax\n"
21389 - "61: movl 36(%4), %%edx\n"
21390 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21391 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21392 " movnti %%eax, 32(%3)\n"
21393 " movnti %%edx, 36(%3)\n"
21394 - "12: movl 40(%4), %%eax\n"
21395 - "71: movl 44(%4), %%edx\n"
21396 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21397 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21398 " movnti %%eax, 40(%3)\n"
21399 " movnti %%edx, 44(%3)\n"
21400 - "13: movl 48(%4), %%eax\n"
21401 - "81: movl 52(%4), %%edx\n"
21402 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21403 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21404 " movnti %%eax, 48(%3)\n"
21405 " movnti %%edx, 52(%3)\n"
21406 - "14: movl 56(%4), %%eax\n"
21407 - "91: movl 60(%4), %%edx\n"
21408 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21409 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21410 " movnti %%eax, 56(%3)\n"
21411 " movnti %%edx, 60(%3)\n"
21412 " addl $-64, %0\n"
21413 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21414 " shrl $2, %0\n"
21415 " andl $3, %%eax\n"
21416 " cld\n"
21417 - "6: rep; movsl\n"
21418 + "6: rep; "__copyuser_seg" movsl\n"
21419 " movl %%eax,%0\n"
21420 - "7: rep; movsb\n"
21421 + "7: rep; "__copyuser_seg" movsb\n"
21422 "8:\n"
21423 ".section .fixup,\"ax\"\n"
21424 "9: lea 0(%%eax,%0,4),%0\n"
21425 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21426 */
21427 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21428 unsigned long size);
21429 -unsigned long __copy_user_intel(void __user *to, const void *from,
21430 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21431 + unsigned long size);
21432 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21433 unsigned long size);
21434 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21435 const void __user *from, unsigned long size);
21436 #endif /* CONFIG_X86_INTEL_USERCOPY */
21437
21438 /* Generic arbitrary sized copy. */
21439 -#define __copy_user(to, from, size) \
21440 +#define __copy_user(to, from, size, prefix, set, restore) \
21441 do { \
21442 int __d0, __d1, __d2; \
21443 __asm__ __volatile__( \
21444 + set \
21445 " cmp $7,%0\n" \
21446 " jbe 1f\n" \
21447 " movl %1,%0\n" \
21448 " negl %0\n" \
21449 " andl $7,%0\n" \
21450 " subl %0,%3\n" \
21451 - "4: rep; movsb\n" \
21452 + "4: rep; "prefix"movsb\n" \
21453 " movl %3,%0\n" \
21454 " shrl $2,%0\n" \
21455 " andl $3,%3\n" \
21456 " .align 2,0x90\n" \
21457 - "0: rep; movsl\n" \
21458 + "0: rep; "prefix"movsl\n" \
21459 " movl %3,%0\n" \
21460 - "1: rep; movsb\n" \
21461 + "1: rep; "prefix"movsb\n" \
21462 "2:\n" \
21463 + restore \
21464 ".section .fixup,\"ax\"\n" \
21465 "5: addl %3,%0\n" \
21466 " jmp 2b\n" \
21467 @@ -682,14 +799,14 @@ do { \
21468 " negl %0\n" \
21469 " andl $7,%0\n" \
21470 " subl %0,%3\n" \
21471 - "4: rep; movsb\n" \
21472 + "4: rep; "__copyuser_seg"movsb\n" \
21473 " movl %3,%0\n" \
21474 " shrl $2,%0\n" \
21475 " andl $3,%3\n" \
21476 " .align 2,0x90\n" \
21477 - "0: rep; movsl\n" \
21478 + "0: rep; "__copyuser_seg"movsl\n" \
21479 " movl %3,%0\n" \
21480 - "1: rep; movsb\n" \
21481 + "1: rep; "__copyuser_seg"movsb\n" \
21482 "2:\n" \
21483 ".section .fixup,\"ax\"\n" \
21484 "5: addl %3,%0\n" \
21485 @@ -775,9 +892,9 @@ survive:
21486 }
21487 #endif
21488 if (movsl_is_ok(to, from, n))
21489 - __copy_user(to, from, n);
21490 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21491 else
21492 - n = __copy_user_intel(to, from, n);
21493 + n = __generic_copy_to_user_intel(to, from, n);
21494 return n;
21495 }
21496 EXPORT_SYMBOL(__copy_to_user_ll);
21497 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21498 unsigned long n)
21499 {
21500 if (movsl_is_ok(to, from, n))
21501 - __copy_user(to, from, n);
21502 + __copy_user(to, from, n, __copyuser_seg, "", "");
21503 else
21504 - n = __copy_user_intel((void __user *)to,
21505 - (const void *)from, n);
21506 + n = __generic_copy_from_user_intel(to, from, n);
21507 return n;
21508 }
21509 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21510 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21511 if (n > 64 && cpu_has_xmm2)
21512 n = __copy_user_intel_nocache(to, from, n);
21513 else
21514 - __copy_user(to, from, n);
21515 + __copy_user(to, from, n, __copyuser_seg, "", "");
21516 #else
21517 - __copy_user(to, from, n);
21518 + __copy_user(to, from, n, __copyuser_seg, "", "");
21519 #endif
21520 return n;
21521 }
21522 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21523
21524 -/**
21525 - * copy_to_user: - Copy a block of data into user space.
21526 - * @to: Destination address, in user space.
21527 - * @from: Source address, in kernel space.
21528 - * @n: Number of bytes to copy.
21529 - *
21530 - * Context: User context only. This function may sleep.
21531 - *
21532 - * Copy data from kernel space to user space.
21533 - *
21534 - * Returns number of bytes that could not be copied.
21535 - * On success, this will be zero.
21536 - */
21537 -unsigned long
21538 -copy_to_user(void __user *to, const void *from, unsigned long n)
21539 -{
21540 - if (access_ok(VERIFY_WRITE, to, n))
21541 - n = __copy_to_user(to, from, n);
21542 - return n;
21543 -}
21544 -EXPORT_SYMBOL(copy_to_user);
21545 -
21546 -/**
21547 - * copy_from_user: - Copy a block of data from user space.
21548 - * @to: Destination address, in kernel space.
21549 - * @from: Source address, in user space.
21550 - * @n: Number of bytes to copy.
21551 - *
21552 - * Context: User context only. This function may sleep.
21553 - *
21554 - * Copy data from user space to kernel space.
21555 - *
21556 - * Returns number of bytes that could not be copied.
21557 - * On success, this will be zero.
21558 - *
21559 - * If some data could not be copied, this function will pad the copied
21560 - * data to the requested size using zero bytes.
21561 - */
21562 -unsigned long
21563 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21564 -{
21565 - if (access_ok(VERIFY_READ, from, n))
21566 - n = __copy_from_user(to, from, n);
21567 - else
21568 - memset(to, 0, n);
21569 - return n;
21570 -}
21571 -EXPORT_SYMBOL(_copy_from_user);
21572 -
21573 void copy_from_user_overflow(void)
21574 {
21575 WARN(1, "Buffer overflow detected!\n");
21576 }
21577 EXPORT_SYMBOL(copy_from_user_overflow);
21578 +
21579 +void copy_to_user_overflow(void)
21580 +{
21581 + WARN(1, "Buffer overflow detected!\n");
21582 +}
21583 +EXPORT_SYMBOL(copy_to_user_overflow);
21584 +
21585 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21586 +void __set_fs(mm_segment_t x)
21587 +{
21588 + switch (x.seg) {
21589 + case 0:
21590 + loadsegment(gs, 0);
21591 + break;
21592 + case TASK_SIZE_MAX:
21593 + loadsegment(gs, __USER_DS);
21594 + break;
21595 + case -1UL:
21596 + loadsegment(gs, __KERNEL_DS);
21597 + break;
21598 + default:
21599 + BUG();
21600 + }
21601 + return;
21602 +}
21603 +EXPORT_SYMBOL(__set_fs);
21604 +
21605 +void set_fs(mm_segment_t x)
21606 +{
21607 + current_thread_info()->addr_limit = x;
21608 + __set_fs(x);
21609 +}
21610 +EXPORT_SYMBOL(set_fs);
21611 +#endif
21612 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21613 index b7c2849..8633ad8 100644
21614 --- a/arch/x86/lib/usercopy_64.c
21615 +++ b/arch/x86/lib/usercopy_64.c
21616 @@ -42,6 +42,12 @@ long
21617 __strncpy_from_user(char *dst, const char __user *src, long count)
21618 {
21619 long res;
21620 +
21621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21622 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21623 + src += PAX_USER_SHADOW_BASE;
21624 +#endif
21625 +
21626 __do_strncpy_from_user(dst, src, count, res);
21627 return res;
21628 }
21629 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21630 {
21631 long __d0;
21632 might_fault();
21633 +
21634 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21635 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21636 + addr += PAX_USER_SHADOW_BASE;
21637 +#endif
21638 +
21639 /* no memory constraint because it doesn't change any memory gcc knows
21640 about */
21641 asm volatile(
21642 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21643 }
21644 EXPORT_SYMBOL(strlen_user);
21645
21646 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21647 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21648 {
21649 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21650 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21651 - }
21652 - return len;
21653 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21654 +
21655 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21656 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21657 + to += PAX_USER_SHADOW_BASE;
21658 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21659 + from += PAX_USER_SHADOW_BASE;
21660 +#endif
21661 +
21662 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21663 + }
21664 + return len;
21665 }
21666 EXPORT_SYMBOL(copy_in_user);
21667
21668 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21669 * it is not necessary to optimize tail handling.
21670 */
21671 unsigned long
21672 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21673 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21674 {
21675 char c;
21676 unsigned zero_len;
21677 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21678 index d0474ad..36e9257 100644
21679 --- a/arch/x86/mm/extable.c
21680 +++ b/arch/x86/mm/extable.c
21681 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21682 const struct exception_table_entry *fixup;
21683
21684 #ifdef CONFIG_PNPBIOS
21685 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21686 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21687 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21688 extern u32 pnp_bios_is_utter_crap;
21689 pnp_bios_is_utter_crap = 1;
21690 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21691 index 5db0490..2ddce45 100644
21692 --- a/arch/x86/mm/fault.c
21693 +++ b/arch/x86/mm/fault.c
21694 @@ -13,11 +13,18 @@
21695 #include <linux/perf_event.h> /* perf_sw_event */
21696 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21697 #include <linux/prefetch.h> /* prefetchw */
21698 +#include <linux/unistd.h>
21699 +#include <linux/compiler.h>
21700
21701 #include <asm/traps.h> /* dotraplinkage, ... */
21702 #include <asm/pgalloc.h> /* pgd_*(), ... */
21703 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21704 #include <asm/fixmap.h> /* VSYSCALL_START */
21705 +#include <asm/tlbflush.h>
21706 +
21707 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21708 +#include <asm/stacktrace.h>
21709 +#endif
21710
21711 /*
21712 * Page fault error code bits:
21713 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21714 int ret = 0;
21715
21716 /* kprobe_running() needs smp_processor_id() */
21717 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21718 + if (kprobes_built_in() && !user_mode(regs)) {
21719 preempt_disable();
21720 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21721 ret = 1;
21722 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21723 return !instr_lo || (instr_lo>>1) == 1;
21724 case 0x00:
21725 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21726 - if (probe_kernel_address(instr, opcode))
21727 + if (user_mode(regs)) {
21728 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21729 + return 0;
21730 + } else if (probe_kernel_address(instr, opcode))
21731 return 0;
21732
21733 *prefetch = (instr_lo == 0xF) &&
21734 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21735 while (instr < max_instr) {
21736 unsigned char opcode;
21737
21738 - if (probe_kernel_address(instr, opcode))
21739 + if (user_mode(regs)) {
21740 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21741 + break;
21742 + } else if (probe_kernel_address(instr, opcode))
21743 break;
21744
21745 instr++;
21746 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21747 force_sig_info(si_signo, &info, tsk);
21748 }
21749
21750 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21751 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21752 +#endif
21753 +
21754 +#ifdef CONFIG_PAX_EMUTRAMP
21755 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21756 +#endif
21757 +
21758 +#ifdef CONFIG_PAX_PAGEEXEC
21759 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21760 +{
21761 + pgd_t *pgd;
21762 + pud_t *pud;
21763 + pmd_t *pmd;
21764 +
21765 + pgd = pgd_offset(mm, address);
21766 + if (!pgd_present(*pgd))
21767 + return NULL;
21768 + pud = pud_offset(pgd, address);
21769 + if (!pud_present(*pud))
21770 + return NULL;
21771 + pmd = pmd_offset(pud, address);
21772 + if (!pmd_present(*pmd))
21773 + return NULL;
21774 + return pmd;
21775 +}
21776 +#endif
21777 +
21778 DEFINE_SPINLOCK(pgd_lock);
21779 LIST_HEAD(pgd_list);
21780
21781 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21782 for (address = VMALLOC_START & PMD_MASK;
21783 address >= TASK_SIZE && address < FIXADDR_TOP;
21784 address += PMD_SIZE) {
21785 +
21786 +#ifdef CONFIG_PAX_PER_CPU_PGD
21787 + unsigned long cpu;
21788 +#else
21789 struct page *page;
21790 +#endif
21791
21792 spin_lock(&pgd_lock);
21793 +
21794 +#ifdef CONFIG_PAX_PER_CPU_PGD
21795 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
21796 + pgd_t *pgd = get_cpu_pgd(cpu);
21797 + pmd_t *ret;
21798 +#else
21799 list_for_each_entry(page, &pgd_list, lru) {
21800 + pgd_t *pgd = page_address(page);
21801 spinlock_t *pgt_lock;
21802 pmd_t *ret;
21803
21804 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21805 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21806
21807 spin_lock(pgt_lock);
21808 - ret = vmalloc_sync_one(page_address(page), address);
21809 +#endif
21810 +
21811 + ret = vmalloc_sync_one(pgd, address);
21812 +
21813 +#ifndef CONFIG_PAX_PER_CPU_PGD
21814 spin_unlock(pgt_lock);
21815 +#endif
21816
21817 if (!ret)
21818 break;
21819 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21820 * an interrupt in the middle of a task switch..
21821 */
21822 pgd_paddr = read_cr3();
21823 +
21824 +#ifdef CONFIG_PAX_PER_CPU_PGD
21825 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21826 +#endif
21827 +
21828 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21829 if (!pmd_k)
21830 return -1;
21831 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21832 * happen within a race in page table update. In the later
21833 * case just flush:
21834 */
21835 +
21836 +#ifdef CONFIG_PAX_PER_CPU_PGD
21837 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21838 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21839 +#else
21840 pgd = pgd_offset(current->active_mm, address);
21841 +#endif
21842 +
21843 pgd_ref = pgd_offset_k(address);
21844 if (pgd_none(*pgd_ref))
21845 return -1;
21846 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21847 static int is_errata100(struct pt_regs *regs, unsigned long address)
21848 {
21849 #ifdef CONFIG_X86_64
21850 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21851 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21852 return 1;
21853 #endif
21854 return 0;
21855 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21856 }
21857
21858 static const char nx_warning[] = KERN_CRIT
21859 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21860 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21861
21862 static void
21863 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21864 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21865 if (!oops_may_print())
21866 return;
21867
21868 - if (error_code & PF_INSTR) {
21869 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21870 unsigned int level;
21871
21872 pte_t *pte = lookup_address(address, &level);
21873
21874 if (pte && pte_present(*pte) && !pte_exec(*pte))
21875 - printk(nx_warning, current_uid());
21876 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21877 }
21878
21879 +#ifdef CONFIG_PAX_KERNEXEC
21880 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21881 + if (current->signal->curr_ip)
21882 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21883 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21884 + else
21885 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21886 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21887 + }
21888 +#endif
21889 +
21890 printk(KERN_ALERT "BUG: unable to handle kernel ");
21891 if (address < PAGE_SIZE)
21892 printk(KERN_CONT "NULL pointer dereference");
21893 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21894 }
21895 #endif
21896
21897 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21898 + if (pax_is_fetch_fault(regs, error_code, address)) {
21899 +
21900 +#ifdef CONFIG_PAX_EMUTRAMP
21901 + switch (pax_handle_fetch_fault(regs)) {
21902 + case 2:
21903 + return;
21904 + }
21905 +#endif
21906 +
21907 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21908 + do_group_exit(SIGKILL);
21909 + }
21910 +#endif
21911 +
21912 if (unlikely(show_unhandled_signals))
21913 show_signal_msg(regs, error_code, address, tsk);
21914
21915 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21916 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21917 printk(KERN_ERR
21918 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21919 - tsk->comm, tsk->pid, address);
21920 + tsk->comm, task_pid_nr(tsk), address);
21921 code = BUS_MCEERR_AR;
21922 }
21923 #endif
21924 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21925 return 1;
21926 }
21927
21928 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21929 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21930 +{
21931 + pte_t *pte;
21932 + pmd_t *pmd;
21933 + spinlock_t *ptl;
21934 + unsigned char pte_mask;
21935 +
21936 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21937 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21938 + return 0;
21939 +
21940 + /* PaX: it's our fault, let's handle it if we can */
21941 +
21942 + /* PaX: take a look at read faults before acquiring any locks */
21943 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21944 + /* instruction fetch attempt from a protected page in user mode */
21945 + up_read(&mm->mmap_sem);
21946 +
21947 +#ifdef CONFIG_PAX_EMUTRAMP
21948 + switch (pax_handle_fetch_fault(regs)) {
21949 + case 2:
21950 + return 1;
21951 + }
21952 +#endif
21953 +
21954 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21955 + do_group_exit(SIGKILL);
21956 + }
21957 +
21958 + pmd = pax_get_pmd(mm, address);
21959 + if (unlikely(!pmd))
21960 + return 0;
21961 +
21962 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21963 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21964 + pte_unmap_unlock(pte, ptl);
21965 + return 0;
21966 + }
21967 +
21968 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21969 + /* write attempt to a protected page in user mode */
21970 + pte_unmap_unlock(pte, ptl);
21971 + return 0;
21972 + }
21973 +
21974 +#ifdef CONFIG_SMP
21975 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21976 +#else
21977 + if (likely(address > get_limit(regs->cs)))
21978 +#endif
21979 + {
21980 + set_pte(pte, pte_mkread(*pte));
21981 + __flush_tlb_one(address);
21982 + pte_unmap_unlock(pte, ptl);
21983 + up_read(&mm->mmap_sem);
21984 + return 1;
21985 + }
21986 +
21987 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21988 +
21989 + /*
21990 + * PaX: fill DTLB with user rights and retry
21991 + */
21992 + __asm__ __volatile__ (
21993 + "orb %2,(%1)\n"
21994 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21995 +/*
21996 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21997 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21998 + * page fault when examined during a TLB load attempt. this is true not only
21999 + * for PTEs holding a non-present entry but also present entries that will
22000 + * raise a page fault (such as those set up by PaX, or the copy-on-write
22001 + * mechanism). in effect it means that we do *not* need to flush the TLBs
22002 + * for our target pages since their PTEs are simply not in the TLBs at all.
22003 +
22004 + * the best thing in omitting it is that we gain around 15-20% speed in the
22005 + * fast path of the page fault handler and can get rid of tracing since we
22006 + * can no longer flush unintended entries.
22007 + */
22008 + "invlpg (%0)\n"
22009 +#endif
22010 + __copyuser_seg"testb $0,(%0)\n"
22011 + "xorb %3,(%1)\n"
22012 + :
22013 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
22014 + : "memory", "cc");
22015 + pte_unmap_unlock(pte, ptl);
22016 + up_read(&mm->mmap_sem);
22017 + return 1;
22018 +}
22019 +#endif
22020 +
22021 /*
22022 * Handle a spurious fault caused by a stale TLB entry.
22023 *
22024 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
22025 static inline int
22026 access_error(unsigned long error_code, struct vm_area_struct *vma)
22027 {
22028 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
22029 + return 1;
22030 +
22031 if (error_code & PF_WRITE) {
22032 /* write, present and write, not present: */
22033 if (unlikely(!(vma->vm_flags & VM_WRITE)))
22034 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22035 {
22036 struct vm_area_struct *vma;
22037 struct task_struct *tsk;
22038 - unsigned long address;
22039 struct mm_struct *mm;
22040 int fault;
22041 int write = error_code & PF_WRITE;
22042 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
22043 (write ? FAULT_FLAG_WRITE : 0);
22044
22045 - tsk = current;
22046 - mm = tsk->mm;
22047 -
22048 /* Get the faulting address: */
22049 - address = read_cr2();
22050 + unsigned long address = read_cr2();
22051 +
22052 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22053 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
22054 + if (!search_exception_tables(regs->ip)) {
22055 + bad_area_nosemaphore(regs, error_code, address);
22056 + return;
22057 + }
22058 + if (address < PAX_USER_SHADOW_BASE) {
22059 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
22060 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
22061 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
22062 + } else
22063 + address -= PAX_USER_SHADOW_BASE;
22064 + }
22065 +#endif
22066 +
22067 + tsk = current;
22068 + mm = tsk->mm;
22069
22070 /*
22071 * Detect and handle instructions that would cause a page fault for
22072 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22073 * User-mode registers count as a user access even for any
22074 * potential system fault or CPU buglet:
22075 */
22076 - if (user_mode_vm(regs)) {
22077 + if (user_mode(regs)) {
22078 local_irq_enable();
22079 error_code |= PF_USER;
22080 } else {
22081 @@ -1122,6 +1328,11 @@ retry:
22082 might_sleep();
22083 }
22084
22085 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22086 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
22087 + return;
22088 +#endif
22089 +
22090 vma = find_vma(mm, address);
22091 if (unlikely(!vma)) {
22092 bad_area(regs, error_code, address);
22093 @@ -1133,18 +1344,24 @@ retry:
22094 bad_area(regs, error_code, address);
22095 return;
22096 }
22097 - if (error_code & PF_USER) {
22098 - /*
22099 - * Accessing the stack below %sp is always a bug.
22100 - * The large cushion allows instructions like enter
22101 - * and pusha to work. ("enter $65535, $31" pushes
22102 - * 32 pointers and then decrements %sp by 65535.)
22103 - */
22104 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
22105 - bad_area(regs, error_code, address);
22106 - return;
22107 - }
22108 + /*
22109 + * Accessing the stack below %sp is always a bug.
22110 + * The large cushion allows instructions like enter
22111 + * and pusha to work. ("enter $65535, $31" pushes
22112 + * 32 pointers and then decrements %sp by 65535.)
22113 + */
22114 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
22115 + bad_area(regs, error_code, address);
22116 + return;
22117 }
22118 +
22119 +#ifdef CONFIG_PAX_SEGMEXEC
22120 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
22121 + bad_area(regs, error_code, address);
22122 + return;
22123 + }
22124 +#endif
22125 +
22126 if (unlikely(expand_stack(vma, address))) {
22127 bad_area(regs, error_code, address);
22128 return;
22129 @@ -1199,3 +1416,292 @@ good_area:
22130
22131 up_read(&mm->mmap_sem);
22132 }
22133 +
22134 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22135 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
22136 +{
22137 + struct mm_struct *mm = current->mm;
22138 + unsigned long ip = regs->ip;
22139 +
22140 + if (v8086_mode(regs))
22141 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
22142 +
22143 +#ifdef CONFIG_PAX_PAGEEXEC
22144 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
22145 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
22146 + return true;
22147 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
22148 + return true;
22149 + return false;
22150 + }
22151 +#endif
22152 +
22153 +#ifdef CONFIG_PAX_SEGMEXEC
22154 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
22155 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
22156 + return true;
22157 + return false;
22158 + }
22159 +#endif
22160 +
22161 + return false;
22162 +}
22163 +#endif
22164 +
22165 +#ifdef CONFIG_PAX_EMUTRAMP
22166 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
22167 +{
22168 + int err;
22169 +
22170 + do { /* PaX: libffi trampoline emulation */
22171 + unsigned char mov, jmp;
22172 + unsigned int addr1, addr2;
22173 +
22174 +#ifdef CONFIG_X86_64
22175 + if ((regs->ip + 9) >> 32)
22176 + break;
22177 +#endif
22178 +
22179 + err = get_user(mov, (unsigned char __user *)regs->ip);
22180 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22181 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22182 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22183 +
22184 + if (err)
22185 + break;
22186 +
22187 + if (mov == 0xB8 && jmp == 0xE9) {
22188 + regs->ax = addr1;
22189 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22190 + return 2;
22191 + }
22192 + } while (0);
22193 +
22194 + do { /* PaX: gcc trampoline emulation #1 */
22195 + unsigned char mov1, mov2;
22196 + unsigned short jmp;
22197 + unsigned int addr1, addr2;
22198 +
22199 +#ifdef CONFIG_X86_64
22200 + if ((regs->ip + 11) >> 32)
22201 + break;
22202 +#endif
22203 +
22204 + err = get_user(mov1, (unsigned char __user *)regs->ip);
22205 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22206 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
22207 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22208 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
22209 +
22210 + if (err)
22211 + break;
22212 +
22213 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
22214 + regs->cx = addr1;
22215 + regs->ax = addr2;
22216 + regs->ip = addr2;
22217 + return 2;
22218 + }
22219 + } while (0);
22220 +
22221 + do { /* PaX: gcc trampoline emulation #2 */
22222 + unsigned char mov, jmp;
22223 + unsigned int addr1, addr2;
22224 +
22225 +#ifdef CONFIG_X86_64
22226 + if ((regs->ip + 9) >> 32)
22227 + break;
22228 +#endif
22229 +
22230 + err = get_user(mov, (unsigned char __user *)regs->ip);
22231 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22232 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22233 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22234 +
22235 + if (err)
22236 + break;
22237 +
22238 + if (mov == 0xB9 && jmp == 0xE9) {
22239 + regs->cx = addr1;
22240 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22241 + return 2;
22242 + }
22243 + } while (0);
22244 +
22245 + return 1; /* PaX in action */
22246 +}
22247 +
22248 +#ifdef CONFIG_X86_64
22249 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
22250 +{
22251 + int err;
22252 +
22253 + do { /* PaX: libffi trampoline emulation */
22254 + unsigned short mov1, mov2, jmp1;
22255 + unsigned char stcclc, jmp2;
22256 + unsigned long addr1, addr2;
22257 +
22258 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22259 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22260 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22261 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22262 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22263 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22264 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22265 +
22266 + if (err)
22267 + break;
22268 +
22269 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22270 + regs->r11 = addr1;
22271 + regs->r10 = addr2;
22272 + if (stcclc == 0xF8)
22273 + regs->flags &= ~X86_EFLAGS_CF;
22274 + else
22275 + regs->flags |= X86_EFLAGS_CF;
22276 + regs->ip = addr1;
22277 + return 2;
22278 + }
22279 + } while (0);
22280 +
22281 + do { /* PaX: gcc trampoline emulation #1 */
22282 + unsigned short mov1, mov2, jmp1;
22283 + unsigned char jmp2;
22284 + unsigned int addr1;
22285 + unsigned long addr2;
22286 +
22287 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22288 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22289 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22290 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22291 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22292 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22293 +
22294 + if (err)
22295 + break;
22296 +
22297 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22298 + regs->r11 = addr1;
22299 + regs->r10 = addr2;
22300 + regs->ip = addr1;
22301 + return 2;
22302 + }
22303 + } while (0);
22304 +
22305 + do { /* PaX: gcc trampoline emulation #2 */
22306 + unsigned short mov1, mov2, jmp1;
22307 + unsigned char jmp2;
22308 + unsigned long addr1, addr2;
22309 +
22310 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22311 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22312 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22313 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22314 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22315 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22316 +
22317 + if (err)
22318 + break;
22319 +
22320 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22321 + regs->r11 = addr1;
22322 + regs->r10 = addr2;
22323 + regs->ip = addr1;
22324 + return 2;
22325 + }
22326 + } while (0);
22327 +
22328 + return 1; /* PaX in action */
22329 +}
22330 +#endif
22331 +
22332 +/*
22333 + * PaX: decide what to do with offenders (regs->ip = fault address)
22334 + *
22335 + * returns 1 when task should be killed
22336 + * 2 when gcc trampoline was detected
22337 + */
22338 +static int pax_handle_fetch_fault(struct pt_regs *regs)
22339 +{
22340 + if (v8086_mode(regs))
22341 + return 1;
22342 +
22343 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22344 + return 1;
22345 +
22346 +#ifdef CONFIG_X86_32
22347 + return pax_handle_fetch_fault_32(regs);
22348 +#else
22349 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22350 + return pax_handle_fetch_fault_32(regs);
22351 + else
22352 + return pax_handle_fetch_fault_64(regs);
22353 +#endif
22354 +}
22355 +#endif
22356 +
22357 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22358 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22359 +{
22360 + long i;
22361 +
22362 + printk(KERN_ERR "PAX: bytes at PC: ");
22363 + for (i = 0; i < 20; i++) {
22364 + unsigned char c;
22365 + if (get_user(c, (unsigned char __force_user *)pc+i))
22366 + printk(KERN_CONT "?? ");
22367 + else
22368 + printk(KERN_CONT "%02x ", c);
22369 + }
22370 + printk("\n");
22371 +
22372 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22373 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22374 + unsigned long c;
22375 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22376 +#ifdef CONFIG_X86_32
22377 + printk(KERN_CONT "???????? ");
22378 +#else
22379 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22380 + printk(KERN_CONT "???????? ???????? ");
22381 + else
22382 + printk(KERN_CONT "???????????????? ");
22383 +#endif
22384 + } else {
22385 +#ifdef CONFIG_X86_64
22386 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22387 + printk(KERN_CONT "%08x ", (unsigned int)c);
22388 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22389 + } else
22390 +#endif
22391 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22392 + }
22393 + }
22394 + printk("\n");
22395 +}
22396 +#endif
22397 +
22398 +/**
22399 + * probe_kernel_write(): safely attempt to write to a location
22400 + * @dst: address to write to
22401 + * @src: pointer to the data that shall be written
22402 + * @size: size of the data chunk
22403 + *
22404 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22405 + * happens, handle that and return -EFAULT.
22406 + */
22407 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22408 +{
22409 + long ret;
22410 + mm_segment_t old_fs = get_fs();
22411 +
22412 + set_fs(KERNEL_DS);
22413 + pagefault_disable();
22414 + pax_open_kernel();
22415 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22416 + pax_close_kernel();
22417 + pagefault_enable();
22418 + set_fs(old_fs);
22419 +
22420 + return ret ? -EFAULT : 0;
22421 +}
22422 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22423 index dd74e46..7d26398 100644
22424 --- a/arch/x86/mm/gup.c
22425 +++ b/arch/x86/mm/gup.c
22426 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22427 addr = start;
22428 len = (unsigned long) nr_pages << PAGE_SHIFT;
22429 end = start + len;
22430 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22431 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22432 (void __user *)start, len)))
22433 return 0;
22434
22435 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22436 index f4f29b1..5cac4fb 100644
22437 --- a/arch/x86/mm/highmem_32.c
22438 +++ b/arch/x86/mm/highmem_32.c
22439 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22440 idx = type + KM_TYPE_NR*smp_processor_id();
22441 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22442 BUG_ON(!pte_none(*(kmap_pte-idx)));
22443 +
22444 + pax_open_kernel();
22445 set_pte(kmap_pte-idx, mk_pte(page, prot));
22446 + pax_close_kernel();
22447 +
22448 arch_flush_lazy_mmu_mode();
22449
22450 return (void *)vaddr;
22451 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22452 index f581a18..29efd37 100644
22453 --- a/arch/x86/mm/hugetlbpage.c
22454 +++ b/arch/x86/mm/hugetlbpage.c
22455 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22456 struct hstate *h = hstate_file(file);
22457 struct mm_struct *mm = current->mm;
22458 struct vm_area_struct *vma;
22459 - unsigned long start_addr;
22460 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22461 +
22462 +#ifdef CONFIG_PAX_SEGMEXEC
22463 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22464 + pax_task_size = SEGMEXEC_TASK_SIZE;
22465 +#endif
22466 +
22467 + pax_task_size -= PAGE_SIZE;
22468
22469 if (len > mm->cached_hole_size) {
22470 - start_addr = mm->free_area_cache;
22471 + start_addr = mm->free_area_cache;
22472 } else {
22473 - start_addr = TASK_UNMAPPED_BASE;
22474 - mm->cached_hole_size = 0;
22475 + start_addr = mm->mmap_base;
22476 + mm->cached_hole_size = 0;
22477 }
22478
22479 full_search:
22480 @@ -280,26 +287,27 @@ full_search:
22481
22482 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22483 /* At this point: (!vma || addr < vma->vm_end). */
22484 - if (TASK_SIZE - len < addr) {
22485 + if (pax_task_size - len < addr) {
22486 /*
22487 * Start a new search - just in case we missed
22488 * some holes.
22489 */
22490 - if (start_addr != TASK_UNMAPPED_BASE) {
22491 - start_addr = TASK_UNMAPPED_BASE;
22492 + if (start_addr != mm->mmap_base) {
22493 + start_addr = mm->mmap_base;
22494 mm->cached_hole_size = 0;
22495 goto full_search;
22496 }
22497 return -ENOMEM;
22498 }
22499 - if (!vma || addr + len <= vma->vm_start) {
22500 - mm->free_area_cache = addr + len;
22501 - return addr;
22502 - }
22503 + if (check_heap_stack_gap(vma, addr, len))
22504 + break;
22505 if (addr + mm->cached_hole_size < vma->vm_start)
22506 mm->cached_hole_size = vma->vm_start - addr;
22507 addr = ALIGN(vma->vm_end, huge_page_size(h));
22508 }
22509 +
22510 + mm->free_area_cache = addr + len;
22511 + return addr;
22512 }
22513
22514 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22515 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22516 {
22517 struct hstate *h = hstate_file(file);
22518 struct mm_struct *mm = current->mm;
22519 - struct vm_area_struct *vma, *prev_vma;
22520 - unsigned long base = mm->mmap_base, addr = addr0;
22521 + struct vm_area_struct *vma;
22522 + unsigned long base = mm->mmap_base, addr;
22523 unsigned long largest_hole = mm->cached_hole_size;
22524 - int first_time = 1;
22525
22526 /* don't allow allocations above current base */
22527 if (mm->free_area_cache > base)
22528 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22529 largest_hole = 0;
22530 mm->free_area_cache = base;
22531 }
22532 -try_again:
22533 +
22534 /* make sure it can fit in the remaining address space */
22535 if (mm->free_area_cache < len)
22536 goto fail;
22537
22538 /* either no address requested or can't fit in requested address hole */
22539 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22540 + addr = (mm->free_area_cache - len);
22541 do {
22542 + addr &= huge_page_mask(h);
22543 + vma = find_vma(mm, addr);
22544 /*
22545 * Lookup failure means no vma is above this address,
22546 * i.e. return with success:
22547 - */
22548 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22549 - return addr;
22550 -
22551 - /*
22552 * new region fits between prev_vma->vm_end and
22553 * vma->vm_start, use it:
22554 */
22555 - if (addr + len <= vma->vm_start &&
22556 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22557 + if (check_heap_stack_gap(vma, addr, len)) {
22558 /* remember the address as a hint for next time */
22559 - mm->cached_hole_size = largest_hole;
22560 - return (mm->free_area_cache = addr);
22561 - } else {
22562 - /* pull free_area_cache down to the first hole */
22563 - if (mm->free_area_cache == vma->vm_end) {
22564 - mm->free_area_cache = vma->vm_start;
22565 - mm->cached_hole_size = largest_hole;
22566 - }
22567 + mm->cached_hole_size = largest_hole;
22568 + return (mm->free_area_cache = addr);
22569 + }
22570 + /* pull free_area_cache down to the first hole */
22571 + if (mm->free_area_cache == vma->vm_end) {
22572 + mm->free_area_cache = vma->vm_start;
22573 + mm->cached_hole_size = largest_hole;
22574 }
22575
22576 /* remember the largest hole we saw so far */
22577 if (addr + largest_hole < vma->vm_start)
22578 - largest_hole = vma->vm_start - addr;
22579 + largest_hole = vma->vm_start - addr;
22580
22581 /* try just below the current vma->vm_start */
22582 - addr = (vma->vm_start - len) & huge_page_mask(h);
22583 - } while (len <= vma->vm_start);
22584 + addr = skip_heap_stack_gap(vma, len);
22585 + } while (!IS_ERR_VALUE(addr));
22586
22587 fail:
22588 /*
22589 - * if hint left us with no space for the requested
22590 - * mapping then try again:
22591 - */
22592 - if (first_time) {
22593 - mm->free_area_cache = base;
22594 - largest_hole = 0;
22595 - first_time = 0;
22596 - goto try_again;
22597 - }
22598 - /*
22599 * A failed mmap() very likely causes application failure,
22600 * so fall back to the bottom-up function here. This scenario
22601 * can happen with large stack limits and large mmap()
22602 * allocations.
22603 */
22604 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22605 +
22606 +#ifdef CONFIG_PAX_SEGMEXEC
22607 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22608 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22609 + else
22610 +#endif
22611 +
22612 + mm->mmap_base = TASK_UNMAPPED_BASE;
22613 +
22614 +#ifdef CONFIG_PAX_RANDMMAP
22615 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22616 + mm->mmap_base += mm->delta_mmap;
22617 +#endif
22618 +
22619 + mm->free_area_cache = mm->mmap_base;
22620 mm->cached_hole_size = ~0UL;
22621 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22622 len, pgoff, flags);
22623 @@ -386,6 +392,7 @@ fail:
22624 /*
22625 * Restore the topdown base:
22626 */
22627 + mm->mmap_base = base;
22628 mm->free_area_cache = base;
22629 mm->cached_hole_size = ~0UL;
22630
22631 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22632 struct hstate *h = hstate_file(file);
22633 struct mm_struct *mm = current->mm;
22634 struct vm_area_struct *vma;
22635 + unsigned long pax_task_size = TASK_SIZE;
22636
22637 if (len & ~huge_page_mask(h))
22638 return -EINVAL;
22639 - if (len > TASK_SIZE)
22640 +
22641 +#ifdef CONFIG_PAX_SEGMEXEC
22642 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22643 + pax_task_size = SEGMEXEC_TASK_SIZE;
22644 +#endif
22645 +
22646 + pax_task_size -= PAGE_SIZE;
22647 +
22648 + if (len > pax_task_size)
22649 return -ENOMEM;
22650
22651 if (flags & MAP_FIXED) {
22652 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22653 if (addr) {
22654 addr = ALIGN(addr, huge_page_size(h));
22655 vma = find_vma(mm, addr);
22656 - if (TASK_SIZE - len >= addr &&
22657 - (!vma || addr + len <= vma->vm_start))
22658 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22659 return addr;
22660 }
22661 if (mm->get_unmapped_area == arch_get_unmapped_area)
22662 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22663 index 87488b9..399f416 100644
22664 --- a/arch/x86/mm/init.c
22665 +++ b/arch/x86/mm/init.c
22666 @@ -15,6 +15,7 @@
22667 #include <asm/tlbflush.h>
22668 #include <asm/tlb.h>
22669 #include <asm/proto.h>
22670 +#include <asm/desc.h>
22671
22672 unsigned long __initdata pgt_buf_start;
22673 unsigned long __meminitdata pgt_buf_end;
22674 @@ -31,7 +32,7 @@ int direct_gbpages
22675 static void __init find_early_table_space(unsigned long end, int use_pse,
22676 int use_gbpages)
22677 {
22678 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22679 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22680 phys_addr_t base;
22681
22682 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22683 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22684 */
22685 int devmem_is_allowed(unsigned long pagenr)
22686 {
22687 +#ifdef CONFIG_GRKERNSEC_KMEM
22688 + /* allow BDA */
22689 + if (!pagenr)
22690 + return 1;
22691 + /* allow EBDA */
22692 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22693 + return 1;
22694 +#else
22695 + if (!pagenr)
22696 + return 1;
22697 +#ifdef CONFIG_VM86
22698 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22699 + return 1;
22700 +#endif
22701 +#endif
22702 +
22703 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22704 + return 1;
22705 +#ifdef CONFIG_GRKERNSEC_KMEM
22706 + /* throw out everything else below 1MB */
22707 if (pagenr <= 256)
22708 - return 1;
22709 + return 0;
22710 +#endif
22711 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22712 return 0;
22713 if (!page_is_ram(pagenr))
22714 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22715
22716 void free_initmem(void)
22717 {
22718 +
22719 +#ifdef CONFIG_PAX_KERNEXEC
22720 +#ifdef CONFIG_X86_32
22721 + /* PaX: limit KERNEL_CS to actual size */
22722 + unsigned long addr, limit;
22723 + struct desc_struct d;
22724 + int cpu;
22725 +
22726 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22727 + limit = (limit - 1UL) >> PAGE_SHIFT;
22728 +
22729 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22730 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
22731 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22732 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22733 + }
22734 +
22735 + /* PaX: make KERNEL_CS read-only */
22736 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22737 + if (!paravirt_enabled())
22738 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22739 +/*
22740 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22741 + pgd = pgd_offset_k(addr);
22742 + pud = pud_offset(pgd, addr);
22743 + pmd = pmd_offset(pud, addr);
22744 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22745 + }
22746 +*/
22747 +#ifdef CONFIG_X86_PAE
22748 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22749 +/*
22750 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22751 + pgd = pgd_offset_k(addr);
22752 + pud = pud_offset(pgd, addr);
22753 + pmd = pmd_offset(pud, addr);
22754 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22755 + }
22756 +*/
22757 +#endif
22758 +
22759 +#ifdef CONFIG_MODULES
22760 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22761 +#endif
22762 +
22763 +#else
22764 + pgd_t *pgd;
22765 + pud_t *pud;
22766 + pmd_t *pmd;
22767 + unsigned long addr, end;
22768 +
22769 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22770 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22771 + pgd = pgd_offset_k(addr);
22772 + pud = pud_offset(pgd, addr);
22773 + pmd = pmd_offset(pud, addr);
22774 + if (!pmd_present(*pmd))
22775 + continue;
22776 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22777 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22778 + else
22779 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22780 + }
22781 +
22782 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22783 + end = addr + KERNEL_IMAGE_SIZE;
22784 + for (; addr < end; addr += PMD_SIZE) {
22785 + pgd = pgd_offset_k(addr);
22786 + pud = pud_offset(pgd, addr);
22787 + pmd = pmd_offset(pud, addr);
22788 + if (!pmd_present(*pmd))
22789 + continue;
22790 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22791 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22792 + }
22793 +#endif
22794 +
22795 + flush_tlb_all();
22796 +#endif
22797 +
22798 free_init_pages("unused kernel memory",
22799 (unsigned long)(&__init_begin),
22800 (unsigned long)(&__init_end));
22801 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22802 index 29f7c6d..b46b35b 100644
22803 --- a/arch/x86/mm/init_32.c
22804 +++ b/arch/x86/mm/init_32.c
22805 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22806 }
22807
22808 /*
22809 - * Creates a middle page table and puts a pointer to it in the
22810 - * given global directory entry. This only returns the gd entry
22811 - * in non-PAE compilation mode, since the middle layer is folded.
22812 - */
22813 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22814 -{
22815 - pud_t *pud;
22816 - pmd_t *pmd_table;
22817 -
22818 -#ifdef CONFIG_X86_PAE
22819 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22820 - if (after_bootmem)
22821 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22822 - else
22823 - pmd_table = (pmd_t *)alloc_low_page();
22824 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22825 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22826 - pud = pud_offset(pgd, 0);
22827 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22828 -
22829 - return pmd_table;
22830 - }
22831 -#endif
22832 - pud = pud_offset(pgd, 0);
22833 - pmd_table = pmd_offset(pud, 0);
22834 -
22835 - return pmd_table;
22836 -}
22837 -
22838 -/*
22839 * Create a page table and place a pointer to it in a middle page
22840 * directory entry:
22841 */
22842 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22843 page_table = (pte_t *)alloc_low_page();
22844
22845 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22846 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22847 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22848 +#else
22849 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22850 +#endif
22851 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22852 }
22853
22854 return pte_offset_kernel(pmd, 0);
22855 }
22856
22857 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22858 +{
22859 + pud_t *pud;
22860 + pmd_t *pmd_table;
22861 +
22862 + pud = pud_offset(pgd, 0);
22863 + pmd_table = pmd_offset(pud, 0);
22864 +
22865 + return pmd_table;
22866 +}
22867 +
22868 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22869 {
22870 int pgd_idx = pgd_index(vaddr);
22871 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22872 int pgd_idx, pmd_idx;
22873 unsigned long vaddr;
22874 pgd_t *pgd;
22875 + pud_t *pud;
22876 pmd_t *pmd;
22877 pte_t *pte = NULL;
22878
22879 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22880 pgd = pgd_base + pgd_idx;
22881
22882 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22883 - pmd = one_md_table_init(pgd);
22884 - pmd = pmd + pmd_index(vaddr);
22885 + pud = pud_offset(pgd, vaddr);
22886 + pmd = pmd_offset(pud, vaddr);
22887 +
22888 +#ifdef CONFIG_X86_PAE
22889 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22890 +#endif
22891 +
22892 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22893 pmd++, pmd_idx++) {
22894 pte = page_table_kmap_check(one_page_table_init(pmd),
22895 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22896 }
22897 }
22898
22899 -static inline int is_kernel_text(unsigned long addr)
22900 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22901 {
22902 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22903 - return 1;
22904 - return 0;
22905 + if ((start > ktla_ktva((unsigned long)_etext) ||
22906 + end <= ktla_ktva((unsigned long)_stext)) &&
22907 + (start > ktla_ktva((unsigned long)_einittext) ||
22908 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22909 +
22910 +#ifdef CONFIG_ACPI_SLEEP
22911 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22912 +#endif
22913 +
22914 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22915 + return 0;
22916 + return 1;
22917 }
22918
22919 /*
22920 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22921 unsigned long last_map_addr = end;
22922 unsigned long start_pfn, end_pfn;
22923 pgd_t *pgd_base = swapper_pg_dir;
22924 - int pgd_idx, pmd_idx, pte_ofs;
22925 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22926 unsigned long pfn;
22927 pgd_t *pgd;
22928 + pud_t *pud;
22929 pmd_t *pmd;
22930 pte_t *pte;
22931 unsigned pages_2m, pages_4k;
22932 @@ -281,8 +282,13 @@ repeat:
22933 pfn = start_pfn;
22934 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22935 pgd = pgd_base + pgd_idx;
22936 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22937 - pmd = one_md_table_init(pgd);
22938 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22939 + pud = pud_offset(pgd, 0);
22940 + pmd = pmd_offset(pud, 0);
22941 +
22942 +#ifdef CONFIG_X86_PAE
22943 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22944 +#endif
22945
22946 if (pfn >= end_pfn)
22947 continue;
22948 @@ -294,14 +300,13 @@ repeat:
22949 #endif
22950 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22951 pmd++, pmd_idx++) {
22952 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22953 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22954
22955 /*
22956 * Map with big pages if possible, otherwise
22957 * create normal page tables:
22958 */
22959 if (use_pse) {
22960 - unsigned int addr2;
22961 pgprot_t prot = PAGE_KERNEL_LARGE;
22962 /*
22963 * first pass will use the same initial
22964 @@ -311,11 +316,7 @@ repeat:
22965 __pgprot(PTE_IDENT_ATTR |
22966 _PAGE_PSE);
22967
22968 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22969 - PAGE_OFFSET + PAGE_SIZE-1;
22970 -
22971 - if (is_kernel_text(addr) ||
22972 - is_kernel_text(addr2))
22973 + if (is_kernel_text(address, address + PMD_SIZE))
22974 prot = PAGE_KERNEL_LARGE_EXEC;
22975
22976 pages_2m++;
22977 @@ -332,7 +333,7 @@ repeat:
22978 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22979 pte += pte_ofs;
22980 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22981 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22982 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22983 pgprot_t prot = PAGE_KERNEL;
22984 /*
22985 * first pass will use the same initial
22986 @@ -340,7 +341,7 @@ repeat:
22987 */
22988 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22989
22990 - if (is_kernel_text(addr))
22991 + if (is_kernel_text(address, address + PAGE_SIZE))
22992 prot = PAGE_KERNEL_EXEC;
22993
22994 pages_4k++;
22995 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22996
22997 pud = pud_offset(pgd, va);
22998 pmd = pmd_offset(pud, va);
22999 - if (!pmd_present(*pmd))
23000 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
23001 break;
23002
23003 pte = pte_offset_kernel(pmd, va);
23004 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
23005
23006 static void __init pagetable_init(void)
23007 {
23008 - pgd_t *pgd_base = swapper_pg_dir;
23009 -
23010 - permanent_kmaps_init(pgd_base);
23011 + permanent_kmaps_init(swapper_pg_dir);
23012 }
23013
23014 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
23015 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
23016 EXPORT_SYMBOL_GPL(__supported_pte_mask);
23017
23018 /* user-defined highmem size */
23019 @@ -757,6 +756,12 @@ void __init mem_init(void)
23020
23021 pci_iommu_alloc();
23022
23023 +#ifdef CONFIG_PAX_PER_CPU_PGD
23024 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23025 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23026 + KERNEL_PGD_PTRS);
23027 +#endif
23028 +
23029 #ifdef CONFIG_FLATMEM
23030 BUG_ON(!mem_map);
23031 #endif
23032 @@ -774,7 +779,7 @@ void __init mem_init(void)
23033 set_highmem_pages_init();
23034
23035 codesize = (unsigned long) &_etext - (unsigned long) &_text;
23036 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
23037 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
23038 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
23039
23040 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
23041 @@ -815,10 +820,10 @@ void __init mem_init(void)
23042 ((unsigned long)&__init_end -
23043 (unsigned long)&__init_begin) >> 10,
23044
23045 - (unsigned long)&_etext, (unsigned long)&_edata,
23046 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
23047 + (unsigned long)&_sdata, (unsigned long)&_edata,
23048 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
23049
23050 - (unsigned long)&_text, (unsigned long)&_etext,
23051 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
23052 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
23053
23054 /*
23055 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
23056 if (!kernel_set_to_readonly)
23057 return;
23058
23059 + start = ktla_ktva(start);
23060 pr_debug("Set kernel text: %lx - %lx for read write\n",
23061 start, start+size);
23062
23063 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
23064 if (!kernel_set_to_readonly)
23065 return;
23066
23067 + start = ktla_ktva(start);
23068 pr_debug("Set kernel text: %lx - %lx for read only\n",
23069 start, start+size);
23070
23071 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
23072 unsigned long start = PFN_ALIGN(_text);
23073 unsigned long size = PFN_ALIGN(_etext) - start;
23074
23075 + start = ktla_ktva(start);
23076 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
23077 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
23078 size >> 10);
23079 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
23080 index bbaaa00..796fa65 100644
23081 --- a/arch/x86/mm/init_64.c
23082 +++ b/arch/x86/mm/init_64.c
23083 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
23084 * around without checking the pgd every time.
23085 */
23086
23087 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
23088 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
23089 EXPORT_SYMBOL_GPL(__supported_pte_mask);
23090
23091 int force_personality32;
23092 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23093
23094 for (address = start; address <= end; address += PGDIR_SIZE) {
23095 const pgd_t *pgd_ref = pgd_offset_k(address);
23096 +
23097 +#ifdef CONFIG_PAX_PER_CPU_PGD
23098 + unsigned long cpu;
23099 +#else
23100 struct page *page;
23101 +#endif
23102
23103 if (pgd_none(*pgd_ref))
23104 continue;
23105
23106 spin_lock(&pgd_lock);
23107 +
23108 +#ifdef CONFIG_PAX_PER_CPU_PGD
23109 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23110 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
23111 +#else
23112 list_for_each_entry(page, &pgd_list, lru) {
23113 pgd_t *pgd;
23114 spinlock_t *pgt_lock;
23115 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23116 /* the pgt_lock only for Xen */
23117 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23118 spin_lock(pgt_lock);
23119 +#endif
23120
23121 if (pgd_none(*pgd))
23122 set_pgd(pgd, *pgd_ref);
23123 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23124 BUG_ON(pgd_page_vaddr(*pgd)
23125 != pgd_page_vaddr(*pgd_ref));
23126
23127 +#ifndef CONFIG_PAX_PER_CPU_PGD
23128 spin_unlock(pgt_lock);
23129 +#endif
23130 +
23131 }
23132 spin_unlock(&pgd_lock);
23133 }
23134 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
23135 pmd = fill_pmd(pud, vaddr);
23136 pte = fill_pte(pmd, vaddr);
23137
23138 + pax_open_kernel();
23139 set_pte(pte, new_pte);
23140 + pax_close_kernel();
23141
23142 /*
23143 * It's enough to flush this one mapping.
23144 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
23145 pgd = pgd_offset_k((unsigned long)__va(phys));
23146 if (pgd_none(*pgd)) {
23147 pud = (pud_t *) spp_getpage();
23148 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
23149 - _PAGE_USER));
23150 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
23151 }
23152 pud = pud_offset(pgd, (unsigned long)__va(phys));
23153 if (pud_none(*pud)) {
23154 pmd = (pmd_t *) spp_getpage();
23155 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
23156 - _PAGE_USER));
23157 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
23158 }
23159 pmd = pmd_offset(pud, phys);
23160 BUG_ON(!pmd_none(*pmd));
23161 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
23162 if (pfn >= pgt_buf_top)
23163 panic("alloc_low_page: ran out of memory");
23164
23165 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23166 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23167 clear_page(adr);
23168 *phys = pfn * PAGE_SIZE;
23169 return adr;
23170 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
23171
23172 phys = __pa(virt);
23173 left = phys & (PAGE_SIZE - 1);
23174 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23175 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23176 adr = (void *)(((unsigned long)adr) | left);
23177
23178 return adr;
23179 @@ -693,6 +707,12 @@ void __init mem_init(void)
23180
23181 pci_iommu_alloc();
23182
23183 +#ifdef CONFIG_PAX_PER_CPU_PGD
23184 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23185 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23186 + KERNEL_PGD_PTRS);
23187 +#endif
23188 +
23189 /* clear_bss() already clear the empty_zero_page */
23190
23191 reservedpages = 0;
23192 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
23193 static struct vm_area_struct gate_vma = {
23194 .vm_start = VSYSCALL_START,
23195 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
23196 - .vm_page_prot = PAGE_READONLY_EXEC,
23197 - .vm_flags = VM_READ | VM_EXEC
23198 + .vm_page_prot = PAGE_READONLY,
23199 + .vm_flags = VM_READ
23200 };
23201
23202 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
23203 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
23204
23205 const char *arch_vma_name(struct vm_area_struct *vma)
23206 {
23207 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23208 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23209 return "[vdso]";
23210 if (vma == &gate_vma)
23211 return "[vsyscall]";
23212 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
23213 index 7b179b4..6bd1777 100644
23214 --- a/arch/x86/mm/iomap_32.c
23215 +++ b/arch/x86/mm/iomap_32.c
23216 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
23217 type = kmap_atomic_idx_push();
23218 idx = type + KM_TYPE_NR * smp_processor_id();
23219 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23220 +
23221 + pax_open_kernel();
23222 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
23223 + pax_close_kernel();
23224 +
23225 arch_flush_lazy_mmu_mode();
23226
23227 return (void *)vaddr;
23228 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
23229 index be1ef57..55f0160 100644
23230 --- a/arch/x86/mm/ioremap.c
23231 +++ b/arch/x86/mm/ioremap.c
23232 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
23233 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
23234 int is_ram = page_is_ram(pfn);
23235
23236 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
23237 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
23238 return NULL;
23239 WARN_ON_ONCE(is_ram);
23240 }
23241 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
23242
23243 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
23244 if (page_is_ram(start >> PAGE_SHIFT))
23245 +#ifdef CONFIG_HIGHMEM
23246 + if ((start >> PAGE_SHIFT) < max_low_pfn)
23247 +#endif
23248 return __va(phys);
23249
23250 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
23251 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
23252 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23253
23254 static __initdata int after_paging_init;
23255 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23256 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23257
23258 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23259 {
23260 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
23261 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23262
23263 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23264 - memset(bm_pte, 0, sizeof(bm_pte));
23265 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
23266 + pmd_populate_user(&init_mm, pmd, bm_pte);
23267
23268 /*
23269 * The boot-ioremap range spans multiple pmds, for which
23270 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23271 index d87dd6d..bf3fa66 100644
23272 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
23273 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23274 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23275 * memory (e.g. tracked pages)? For now, we need this to avoid
23276 * invoking kmemcheck for PnP BIOS calls.
23277 */
23278 - if (regs->flags & X86_VM_MASK)
23279 + if (v8086_mode(regs))
23280 return false;
23281 - if (regs->cs != __KERNEL_CS)
23282 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23283 return false;
23284
23285 pte = kmemcheck_pte_lookup(address);
23286 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23287 index 845df68..1d8d29f 100644
23288 --- a/arch/x86/mm/mmap.c
23289 +++ b/arch/x86/mm/mmap.c
23290 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23291 * Leave an at least ~128 MB hole with possible stack randomization.
23292 */
23293 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23294 -#define MAX_GAP (TASK_SIZE/6*5)
23295 +#define MAX_GAP (pax_task_size/6*5)
23296
23297 static int mmap_is_legacy(void)
23298 {
23299 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23300 return rnd << PAGE_SHIFT;
23301 }
23302
23303 -static unsigned long mmap_base(void)
23304 +static unsigned long mmap_base(struct mm_struct *mm)
23305 {
23306 unsigned long gap = rlimit(RLIMIT_STACK);
23307 + unsigned long pax_task_size = TASK_SIZE;
23308 +
23309 +#ifdef CONFIG_PAX_SEGMEXEC
23310 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23311 + pax_task_size = SEGMEXEC_TASK_SIZE;
23312 +#endif
23313
23314 if (gap < MIN_GAP)
23315 gap = MIN_GAP;
23316 else if (gap > MAX_GAP)
23317 gap = MAX_GAP;
23318
23319 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23320 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23321 }
23322
23323 /*
23324 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23325 * does, but not when emulating X86_32
23326 */
23327 -static unsigned long mmap_legacy_base(void)
23328 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
23329 {
23330 - if (mmap_is_ia32())
23331 + if (mmap_is_ia32()) {
23332 +
23333 +#ifdef CONFIG_PAX_SEGMEXEC
23334 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23335 + return SEGMEXEC_TASK_UNMAPPED_BASE;
23336 + else
23337 +#endif
23338 +
23339 return TASK_UNMAPPED_BASE;
23340 - else
23341 + } else
23342 return TASK_UNMAPPED_BASE + mmap_rnd();
23343 }
23344
23345 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23346 void arch_pick_mmap_layout(struct mm_struct *mm)
23347 {
23348 if (mmap_is_legacy()) {
23349 - mm->mmap_base = mmap_legacy_base();
23350 + mm->mmap_base = mmap_legacy_base(mm);
23351 +
23352 +#ifdef CONFIG_PAX_RANDMMAP
23353 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23354 + mm->mmap_base += mm->delta_mmap;
23355 +#endif
23356 +
23357 mm->get_unmapped_area = arch_get_unmapped_area;
23358 mm->unmap_area = arch_unmap_area;
23359 } else {
23360 - mm->mmap_base = mmap_base();
23361 + mm->mmap_base = mmap_base(mm);
23362 +
23363 +#ifdef CONFIG_PAX_RANDMMAP
23364 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23365 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23366 +#endif
23367 +
23368 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23369 mm->unmap_area = arch_unmap_area_topdown;
23370 }
23371 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23372 index de54b9b..799051e 100644
23373 --- a/arch/x86/mm/mmio-mod.c
23374 +++ b/arch/x86/mm/mmio-mod.c
23375 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23376 break;
23377 default:
23378 {
23379 - unsigned char *ip = (unsigned char *)instptr;
23380 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23381 my_trace->opcode = MMIO_UNKNOWN_OP;
23382 my_trace->width = 0;
23383 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23384 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23385 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23386 void __iomem *addr)
23387 {
23388 - static atomic_t next_id;
23389 + static atomic_unchecked_t next_id;
23390 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23391 /* These are page-unaligned. */
23392 struct mmiotrace_map map = {
23393 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23394 .private = trace
23395 },
23396 .phys = offset,
23397 - .id = atomic_inc_return(&next_id)
23398 + .id = atomic_inc_return_unchecked(&next_id)
23399 };
23400 map.map_id = trace->id;
23401
23402 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23403 index b008656..773eac2 100644
23404 --- a/arch/x86/mm/pageattr-test.c
23405 +++ b/arch/x86/mm/pageattr-test.c
23406 @@ -36,7 +36,7 @@ enum {
23407
23408 static int pte_testbit(pte_t pte)
23409 {
23410 - return pte_flags(pte) & _PAGE_UNUSED1;
23411 + return pte_flags(pte) & _PAGE_CPA_TEST;
23412 }
23413
23414 struct split_state {
23415 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23416 index f9e5267..77b1a40 100644
23417 --- a/arch/x86/mm/pageattr.c
23418 +++ b/arch/x86/mm/pageattr.c
23419 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23420 */
23421 #ifdef CONFIG_PCI_BIOS
23422 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23423 - pgprot_val(forbidden) |= _PAGE_NX;
23424 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23425 #endif
23426
23427 /*
23428 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23429 * Does not cover __inittext since that is gone later on. On
23430 * 64bit we do not enforce !NX on the low mapping
23431 */
23432 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23433 - pgprot_val(forbidden) |= _PAGE_NX;
23434 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23435 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23436
23437 +#ifdef CONFIG_DEBUG_RODATA
23438 /*
23439 * The .rodata section needs to be read-only. Using the pfn
23440 * catches all aliases.
23441 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23442 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23443 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23444 pgprot_val(forbidden) |= _PAGE_RW;
23445 +#endif
23446
23447 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23448 /*
23449 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23450 }
23451 #endif
23452
23453 +#ifdef CONFIG_PAX_KERNEXEC
23454 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23455 + pgprot_val(forbidden) |= _PAGE_RW;
23456 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23457 + }
23458 +#endif
23459 +
23460 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23461
23462 return prot;
23463 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23464 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23465 {
23466 /* change init_mm */
23467 + pax_open_kernel();
23468 set_pte_atomic(kpte, pte);
23469 +
23470 #ifdef CONFIG_X86_32
23471 if (!SHARED_KERNEL_PMD) {
23472 +
23473 +#ifdef CONFIG_PAX_PER_CPU_PGD
23474 + unsigned long cpu;
23475 +#else
23476 struct page *page;
23477 +#endif
23478
23479 +#ifdef CONFIG_PAX_PER_CPU_PGD
23480 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23481 + pgd_t *pgd = get_cpu_pgd(cpu);
23482 +#else
23483 list_for_each_entry(page, &pgd_list, lru) {
23484 - pgd_t *pgd;
23485 + pgd_t *pgd = (pgd_t *)page_address(page);
23486 +#endif
23487 +
23488 pud_t *pud;
23489 pmd_t *pmd;
23490
23491 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23492 + pgd += pgd_index(address);
23493 pud = pud_offset(pgd, address);
23494 pmd = pmd_offset(pud, address);
23495 set_pte_atomic((pte_t *)pmd, pte);
23496 }
23497 }
23498 #endif
23499 + pax_close_kernel();
23500 }
23501
23502 static int
23503 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23504 index f6ff57b..481690f 100644
23505 --- a/arch/x86/mm/pat.c
23506 +++ b/arch/x86/mm/pat.c
23507 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23508
23509 if (!entry) {
23510 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23511 - current->comm, current->pid, start, end);
23512 + current->comm, task_pid_nr(current), start, end);
23513 return -EINVAL;
23514 }
23515
23516 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23517 while (cursor < to) {
23518 if (!devmem_is_allowed(pfn)) {
23519 printk(KERN_INFO
23520 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23521 - current->comm, from, to);
23522 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23523 + current->comm, from, to, cursor);
23524 return 0;
23525 }
23526 cursor += PAGE_SIZE;
23527 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23528 printk(KERN_INFO
23529 "%s:%d ioremap_change_attr failed %s "
23530 "for %Lx-%Lx\n",
23531 - current->comm, current->pid,
23532 + current->comm, task_pid_nr(current),
23533 cattr_name(flags),
23534 base, (unsigned long long)(base + size));
23535 return -EINVAL;
23536 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23537 if (want_flags != flags) {
23538 printk(KERN_WARNING
23539 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23540 - current->comm, current->pid,
23541 + current->comm, task_pid_nr(current),
23542 cattr_name(want_flags),
23543 (unsigned long long)paddr,
23544 (unsigned long long)(paddr + size),
23545 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23546 free_memtype(paddr, paddr + size);
23547 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23548 " for %Lx-%Lx, got %s\n",
23549 - current->comm, current->pid,
23550 + current->comm, task_pid_nr(current),
23551 cattr_name(want_flags),
23552 (unsigned long long)paddr,
23553 (unsigned long long)(paddr + size),
23554 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23555 index 9f0614d..92ae64a 100644
23556 --- a/arch/x86/mm/pf_in.c
23557 +++ b/arch/x86/mm/pf_in.c
23558 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23559 int i;
23560 enum reason_type rv = OTHERS;
23561
23562 - p = (unsigned char *)ins_addr;
23563 + p = (unsigned char *)ktla_ktva(ins_addr);
23564 p += skip_prefix(p, &prf);
23565 p += get_opcode(p, &opcode);
23566
23567 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23568 struct prefix_bits prf;
23569 int i;
23570
23571 - p = (unsigned char *)ins_addr;
23572 + p = (unsigned char *)ktla_ktva(ins_addr);
23573 p += skip_prefix(p, &prf);
23574 p += get_opcode(p, &opcode);
23575
23576 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23577 struct prefix_bits prf;
23578 int i;
23579
23580 - p = (unsigned char *)ins_addr;
23581 + p = (unsigned char *)ktla_ktva(ins_addr);
23582 p += skip_prefix(p, &prf);
23583 p += get_opcode(p, &opcode);
23584
23585 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23586 struct prefix_bits prf;
23587 int i;
23588
23589 - p = (unsigned char *)ins_addr;
23590 + p = (unsigned char *)ktla_ktva(ins_addr);
23591 p += skip_prefix(p, &prf);
23592 p += get_opcode(p, &opcode);
23593 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23594 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23595 struct prefix_bits prf;
23596 int i;
23597
23598 - p = (unsigned char *)ins_addr;
23599 + p = (unsigned char *)ktla_ktva(ins_addr);
23600 p += skip_prefix(p, &prf);
23601 p += get_opcode(p, &opcode);
23602 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23603 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23604 index 8573b83..c3b1a30 100644
23605 --- a/arch/x86/mm/pgtable.c
23606 +++ b/arch/x86/mm/pgtable.c
23607 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23608 list_del(&page->lru);
23609 }
23610
23611 -#define UNSHARED_PTRS_PER_PGD \
23612 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23613 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23614 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23615
23616 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23617 +{
23618 + while (count--)
23619 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23620 +}
23621 +#endif
23622
23623 +#ifdef CONFIG_PAX_PER_CPU_PGD
23624 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23625 +{
23626 + while (count--)
23627 +
23628 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23629 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23630 +#else
23631 + *dst++ = *src++;
23632 +#endif
23633 +
23634 +}
23635 +#endif
23636 +
23637 +#ifdef CONFIG_X86_64
23638 +#define pxd_t pud_t
23639 +#define pyd_t pgd_t
23640 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23641 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23642 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23643 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
23644 +#define PYD_SIZE PGDIR_SIZE
23645 +#else
23646 +#define pxd_t pmd_t
23647 +#define pyd_t pud_t
23648 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23649 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23650 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23651 +#define pyd_offset(mm, address) pud_offset((mm), (address))
23652 +#define PYD_SIZE PUD_SIZE
23653 +#endif
23654 +
23655 +#ifdef CONFIG_PAX_PER_CPU_PGD
23656 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23657 +static inline void pgd_dtor(pgd_t *pgd) {}
23658 +#else
23659 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23660 {
23661 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23662 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23663 pgd_list_del(pgd);
23664 spin_unlock(&pgd_lock);
23665 }
23666 +#endif
23667
23668 /*
23669 * List of all pgd's needed for non-PAE so it can invalidate entries
23670 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23671 * -- wli
23672 */
23673
23674 -#ifdef CONFIG_X86_PAE
23675 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23676 /*
23677 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23678 * updating the top-level pagetable entries to guarantee the
23679 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23680 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23681 * and initialize the kernel pmds here.
23682 */
23683 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23684 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23685
23686 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23687 {
23688 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23689 */
23690 flush_tlb_mm(mm);
23691 }
23692 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23693 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23694 #else /* !CONFIG_X86_PAE */
23695
23696 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23697 -#define PREALLOCATED_PMDS 0
23698 +#define PREALLOCATED_PXDS 0
23699
23700 #endif /* CONFIG_X86_PAE */
23701
23702 -static void free_pmds(pmd_t *pmds[])
23703 +static void free_pxds(pxd_t *pxds[])
23704 {
23705 int i;
23706
23707 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23708 - if (pmds[i])
23709 - free_page((unsigned long)pmds[i]);
23710 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23711 + if (pxds[i])
23712 + free_page((unsigned long)pxds[i]);
23713 }
23714
23715 -static int preallocate_pmds(pmd_t *pmds[])
23716 +static int preallocate_pxds(pxd_t *pxds[])
23717 {
23718 int i;
23719 bool failed = false;
23720
23721 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23722 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23723 - if (pmd == NULL)
23724 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23725 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23726 + if (pxd == NULL)
23727 failed = true;
23728 - pmds[i] = pmd;
23729 + pxds[i] = pxd;
23730 }
23731
23732 if (failed) {
23733 - free_pmds(pmds);
23734 + free_pxds(pxds);
23735 return -ENOMEM;
23736 }
23737
23738 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23739 * preallocate which never got a corresponding vma will need to be
23740 * freed manually.
23741 */
23742 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23743 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23744 {
23745 int i;
23746
23747 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23748 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23749 pgd_t pgd = pgdp[i];
23750
23751 if (pgd_val(pgd) != 0) {
23752 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23753 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23754
23755 - pgdp[i] = native_make_pgd(0);
23756 + set_pgd(pgdp + i, native_make_pgd(0));
23757
23758 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23759 - pmd_free(mm, pmd);
23760 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23761 + pxd_free(mm, pxd);
23762 }
23763 }
23764 }
23765
23766 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23767 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23768 {
23769 - pud_t *pud;
23770 + pyd_t *pyd;
23771 unsigned long addr;
23772 int i;
23773
23774 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23775 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23776 return;
23777
23778 - pud = pud_offset(pgd, 0);
23779 +#ifdef CONFIG_X86_64
23780 + pyd = pyd_offset(mm, 0L);
23781 +#else
23782 + pyd = pyd_offset(pgd, 0L);
23783 +#endif
23784
23785 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23786 - i++, pud++, addr += PUD_SIZE) {
23787 - pmd_t *pmd = pmds[i];
23788 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23789 + i++, pyd++, addr += PYD_SIZE) {
23790 + pxd_t *pxd = pxds[i];
23791
23792 if (i >= KERNEL_PGD_BOUNDARY)
23793 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23794 - sizeof(pmd_t) * PTRS_PER_PMD);
23795 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23796 + sizeof(pxd_t) * PTRS_PER_PMD);
23797
23798 - pud_populate(mm, pud, pmd);
23799 + pyd_populate(mm, pyd, pxd);
23800 }
23801 }
23802
23803 pgd_t *pgd_alloc(struct mm_struct *mm)
23804 {
23805 pgd_t *pgd;
23806 - pmd_t *pmds[PREALLOCATED_PMDS];
23807 + pxd_t *pxds[PREALLOCATED_PXDS];
23808
23809 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23810
23811 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23812
23813 mm->pgd = pgd;
23814
23815 - if (preallocate_pmds(pmds) != 0)
23816 + if (preallocate_pxds(pxds) != 0)
23817 goto out_free_pgd;
23818
23819 if (paravirt_pgd_alloc(mm) != 0)
23820 - goto out_free_pmds;
23821 + goto out_free_pxds;
23822
23823 /*
23824 * Make sure that pre-populating the pmds is atomic with
23825 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23826 spin_lock(&pgd_lock);
23827
23828 pgd_ctor(mm, pgd);
23829 - pgd_prepopulate_pmd(mm, pgd, pmds);
23830 + pgd_prepopulate_pxd(mm, pgd, pxds);
23831
23832 spin_unlock(&pgd_lock);
23833
23834 return pgd;
23835
23836 -out_free_pmds:
23837 - free_pmds(pmds);
23838 +out_free_pxds:
23839 + free_pxds(pxds);
23840 out_free_pgd:
23841 free_page((unsigned long)pgd);
23842 out:
23843 @@ -295,7 +344,7 @@ out:
23844
23845 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23846 {
23847 - pgd_mop_up_pmds(mm, pgd);
23848 + pgd_mop_up_pxds(mm, pgd);
23849 pgd_dtor(pgd);
23850 paravirt_pgd_free(mm, pgd);
23851 free_page((unsigned long)pgd);
23852 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23853 index cac7184..09a39fa 100644
23854 --- a/arch/x86/mm/pgtable_32.c
23855 +++ b/arch/x86/mm/pgtable_32.c
23856 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23857 return;
23858 }
23859 pte = pte_offset_kernel(pmd, vaddr);
23860 +
23861 + pax_open_kernel();
23862 if (pte_val(pteval))
23863 set_pte_at(&init_mm, vaddr, pte, pteval);
23864 else
23865 pte_clear(&init_mm, vaddr, pte);
23866 + pax_close_kernel();
23867
23868 /*
23869 * It's enough to flush this one mapping.
23870 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23871 index 410531d..0f16030 100644
23872 --- a/arch/x86/mm/setup_nx.c
23873 +++ b/arch/x86/mm/setup_nx.c
23874 @@ -5,8 +5,10 @@
23875 #include <asm/pgtable.h>
23876 #include <asm/proto.h>
23877
23878 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23879 static int disable_nx __cpuinitdata;
23880
23881 +#ifndef CONFIG_PAX_PAGEEXEC
23882 /*
23883 * noexec = on|off
23884 *
23885 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23886 return 0;
23887 }
23888 early_param("noexec", noexec_setup);
23889 +#endif
23890 +
23891 +#endif
23892
23893 void __cpuinit x86_configure_nx(void)
23894 {
23895 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23896 if (cpu_has_nx && !disable_nx)
23897 __supported_pte_mask |= _PAGE_NX;
23898 else
23899 +#endif
23900 __supported_pte_mask &= ~_PAGE_NX;
23901 }
23902
23903 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23904 index d6c0418..06a0ad5 100644
23905 --- a/arch/x86/mm/tlb.c
23906 +++ b/arch/x86/mm/tlb.c
23907 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23908 BUG();
23909 cpumask_clear_cpu(cpu,
23910 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23911 +
23912 +#ifndef CONFIG_PAX_PER_CPU_PGD
23913 load_cr3(swapper_pg_dir);
23914 +#endif
23915 +
23916 }
23917 EXPORT_SYMBOL_GPL(leave_mm);
23918
23919 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23920 index 6687022..ceabcfa 100644
23921 --- a/arch/x86/net/bpf_jit.S
23922 +++ b/arch/x86/net/bpf_jit.S
23923 @@ -9,6 +9,7 @@
23924 */
23925 #include <linux/linkage.h>
23926 #include <asm/dwarf2.h>
23927 +#include <asm/alternative-asm.h>
23928
23929 /*
23930 * Calling convention :
23931 @@ -35,6 +36,7 @@ sk_load_word:
23932 jle bpf_slow_path_word
23933 mov (SKBDATA,%rsi),%eax
23934 bswap %eax /* ntohl() */
23935 + pax_force_retaddr
23936 ret
23937
23938
23939 @@ -53,6 +55,7 @@ sk_load_half:
23940 jle bpf_slow_path_half
23941 movzwl (SKBDATA,%rsi),%eax
23942 rol $8,%ax # ntohs()
23943 + pax_force_retaddr
23944 ret
23945
23946 sk_load_byte_ind:
23947 @@ -66,6 +69,7 @@ sk_load_byte:
23948 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23949 jle bpf_slow_path_byte
23950 movzbl (SKBDATA,%rsi),%eax
23951 + pax_force_retaddr
23952 ret
23953
23954 /**
23955 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23956 movzbl (SKBDATA,%rsi),%ebx
23957 and $15,%bl
23958 shl $2,%bl
23959 + pax_force_retaddr
23960 ret
23961 CFI_ENDPROC
23962 ENDPROC(sk_load_byte_msh)
23963 @@ -91,6 +96,7 @@ bpf_error:
23964 xor %eax,%eax
23965 mov -8(%rbp),%rbx
23966 leaveq
23967 + pax_force_retaddr
23968 ret
23969
23970 /* rsi contains offset and can be scratched */
23971 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23972 js bpf_error
23973 mov -12(%rbp),%eax
23974 bswap %eax
23975 + pax_force_retaddr
23976 ret
23977
23978 bpf_slow_path_half:
23979 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23980 mov -12(%rbp),%ax
23981 rol $8,%ax
23982 movzwl %ax,%eax
23983 + pax_force_retaddr
23984 ret
23985
23986 bpf_slow_path_byte:
23987 bpf_slow_path_common(1)
23988 js bpf_error
23989 movzbl -12(%rbp),%eax
23990 + pax_force_retaddr
23991 ret
23992
23993 bpf_slow_path_byte_msh:
23994 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23995 and $15,%al
23996 shl $2,%al
23997 xchg %eax,%ebx
23998 + pax_force_retaddr
23999 ret
24000 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
24001 index 7c1b765..8c072c6 100644
24002 --- a/arch/x86/net/bpf_jit_comp.c
24003 +++ b/arch/x86/net/bpf_jit_comp.c
24004 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
24005 set_fs(old_fs);
24006 }
24007
24008 +struct bpf_jit_work {
24009 + struct work_struct work;
24010 + void *image;
24011 +};
24012
24013 void bpf_jit_compile(struct sk_filter *fp)
24014 {
24015 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
24016 if (addrs == NULL)
24017 return;
24018
24019 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
24020 + if (!fp->work)
24021 + goto out;
24022 +
24023 /* Before first pass, make a rough estimation of addrs[]
24024 * each bpf instruction is translated to less than 64 bytes
24025 */
24026 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
24027 func = sk_load_word;
24028 common_load: seen |= SEEN_DATAREF;
24029 if ((int)K < 0)
24030 - goto out;
24031 + goto error;
24032 t_offset = func - (image + addrs[i]);
24033 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
24034 EMIT1_off32(0xe8, t_offset); /* call */
24035 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24036 break;
24037 default:
24038 /* hmm, too complex filter, give up with jit compiler */
24039 - goto out;
24040 + goto error;
24041 }
24042 ilen = prog - temp;
24043 if (image) {
24044 if (unlikely(proglen + ilen > oldproglen)) {
24045 pr_err("bpb_jit_compile fatal error\n");
24046 - kfree(addrs);
24047 - module_free(NULL, image);
24048 - return;
24049 + module_free_exec(NULL, image);
24050 + goto error;
24051 }
24052 + pax_open_kernel();
24053 memcpy(image + proglen, temp, ilen);
24054 + pax_close_kernel();
24055 }
24056 proglen += ilen;
24057 addrs[i] = proglen;
24058 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24059 break;
24060 }
24061 if (proglen == oldproglen) {
24062 - image = module_alloc(max_t(unsigned int,
24063 - proglen,
24064 - sizeof(struct work_struct)));
24065 + image = module_alloc_exec(proglen);
24066 if (!image)
24067 - goto out;
24068 + goto error;
24069 }
24070 oldproglen = proglen;
24071 }
24072 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24073 bpf_flush_icache(image, image + proglen);
24074
24075 fp->bpf_func = (void *)image;
24076 - }
24077 + } else
24078 +error:
24079 + kfree(fp->work);
24080 +
24081 out:
24082 kfree(addrs);
24083 return;
24084 @@ -645,18 +655,20 @@ out:
24085
24086 static void jit_free_defer(struct work_struct *arg)
24087 {
24088 - module_free(NULL, arg);
24089 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
24090 + kfree(arg);
24091 }
24092
24093 /* run from softirq, we must use a work_struct to call
24094 - * module_free() from process context
24095 + * module_free_exec() from process context
24096 */
24097 void bpf_jit_free(struct sk_filter *fp)
24098 {
24099 if (fp->bpf_func != sk_run_filter) {
24100 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
24101 + struct work_struct *work = &fp->work->work;
24102
24103 INIT_WORK(work, jit_free_defer);
24104 + fp->work->image = fp->bpf_func;
24105 schedule_work(work);
24106 }
24107 }
24108 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
24109 index bff89df..377758a 100644
24110 --- a/arch/x86/oprofile/backtrace.c
24111 +++ b/arch/x86/oprofile/backtrace.c
24112 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
24113 struct stack_frame_ia32 *fp;
24114 unsigned long bytes;
24115
24116 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24117 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24118 if (bytes != sizeof(bufhead))
24119 return NULL;
24120
24121 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
24122 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
24123
24124 oprofile_add_trace(bufhead[0].return_address);
24125
24126 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
24127 struct stack_frame bufhead[2];
24128 unsigned long bytes;
24129
24130 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24131 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24132 if (bytes != sizeof(bufhead))
24133 return NULL;
24134
24135 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
24136 {
24137 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
24138
24139 - if (!user_mode_vm(regs)) {
24140 + if (!user_mode(regs)) {
24141 unsigned long stack = kernel_stack_pointer(regs);
24142 if (depth)
24143 dump_trace(NULL, regs, (unsigned long *)stack, 0,
24144 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
24145 index cb29191..036766d 100644
24146 --- a/arch/x86/pci/mrst.c
24147 +++ b/arch/x86/pci/mrst.c
24148 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
24149 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
24150 pci_mmcfg_late_init();
24151 pcibios_enable_irq = mrst_pci_irq_enable;
24152 - pci_root_ops = pci_mrst_ops;
24153 + pax_open_kernel();
24154 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
24155 + pax_close_kernel();
24156 /* Continue with standard init */
24157 return 1;
24158 }
24159 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
24160 index db0e9a5..0372c14 100644
24161 --- a/arch/x86/pci/pcbios.c
24162 +++ b/arch/x86/pci/pcbios.c
24163 @@ -79,50 +79,93 @@ union bios32 {
24164 static struct {
24165 unsigned long address;
24166 unsigned short segment;
24167 -} bios32_indirect = { 0, __KERNEL_CS };
24168 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
24169
24170 /*
24171 * Returns the entry point for the given service, NULL on error
24172 */
24173
24174 -static unsigned long bios32_service(unsigned long service)
24175 +static unsigned long __devinit bios32_service(unsigned long service)
24176 {
24177 unsigned char return_code; /* %al */
24178 unsigned long address; /* %ebx */
24179 unsigned long length; /* %ecx */
24180 unsigned long entry; /* %edx */
24181 unsigned long flags;
24182 + struct desc_struct d, *gdt;
24183
24184 local_irq_save(flags);
24185 - __asm__("lcall *(%%edi); cld"
24186 +
24187 + gdt = get_cpu_gdt_table(smp_processor_id());
24188 +
24189 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
24190 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24191 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
24192 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24193 +
24194 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
24195 : "=a" (return_code),
24196 "=b" (address),
24197 "=c" (length),
24198 "=d" (entry)
24199 : "0" (service),
24200 "1" (0),
24201 - "D" (&bios32_indirect));
24202 + "D" (&bios32_indirect),
24203 + "r"(__PCIBIOS_DS)
24204 + : "memory");
24205 +
24206 + pax_open_kernel();
24207 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
24208 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
24209 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
24210 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
24211 + pax_close_kernel();
24212 +
24213 local_irq_restore(flags);
24214
24215 switch (return_code) {
24216 - case 0:
24217 - return address + entry;
24218 - case 0x80: /* Not present */
24219 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24220 - return 0;
24221 - default: /* Shouldn't happen */
24222 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24223 - service, return_code);
24224 + case 0: {
24225 + int cpu;
24226 + unsigned char flags;
24227 +
24228 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
24229 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
24230 + printk(KERN_WARNING "bios32_service: not valid\n");
24231 return 0;
24232 + }
24233 + address = address + PAGE_OFFSET;
24234 + length += 16UL; /* some BIOSs underreport this... */
24235 + flags = 4;
24236 + if (length >= 64*1024*1024) {
24237 + length >>= PAGE_SHIFT;
24238 + flags |= 8;
24239 + }
24240 +
24241 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24242 + gdt = get_cpu_gdt_table(cpu);
24243 + pack_descriptor(&d, address, length, 0x9b, flags);
24244 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24245 + pack_descriptor(&d, address, length, 0x93, flags);
24246 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24247 + }
24248 + return entry;
24249 + }
24250 + case 0x80: /* Not present */
24251 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24252 + return 0;
24253 + default: /* Shouldn't happen */
24254 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24255 + service, return_code);
24256 + return 0;
24257 }
24258 }
24259
24260 static struct {
24261 unsigned long address;
24262 unsigned short segment;
24263 -} pci_indirect = { 0, __KERNEL_CS };
24264 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24265
24266 -static int pci_bios_present;
24267 +static int pci_bios_present __read_only;
24268
24269 static int __devinit check_pcibios(void)
24270 {
24271 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
24272 unsigned long flags, pcibios_entry;
24273
24274 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24275 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24276 + pci_indirect.address = pcibios_entry;
24277
24278 local_irq_save(flags);
24279 - __asm__(
24280 - "lcall *(%%edi); cld\n\t"
24281 + __asm__("movw %w6, %%ds\n\t"
24282 + "lcall *%%ss:(%%edi); cld\n\t"
24283 + "push %%ss\n\t"
24284 + "pop %%ds\n\t"
24285 "jc 1f\n\t"
24286 "xor %%ah, %%ah\n"
24287 "1:"
24288 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
24289 "=b" (ebx),
24290 "=c" (ecx)
24291 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24292 - "D" (&pci_indirect)
24293 + "D" (&pci_indirect),
24294 + "r" (__PCIBIOS_DS)
24295 : "memory");
24296 local_irq_restore(flags);
24297
24298 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24299
24300 switch (len) {
24301 case 1:
24302 - __asm__("lcall *(%%esi); cld\n\t"
24303 + __asm__("movw %w6, %%ds\n\t"
24304 + "lcall *%%ss:(%%esi); cld\n\t"
24305 + "push %%ss\n\t"
24306 + "pop %%ds\n\t"
24307 "jc 1f\n\t"
24308 "xor %%ah, %%ah\n"
24309 "1:"
24310 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24311 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24312 "b" (bx),
24313 "D" ((long)reg),
24314 - "S" (&pci_indirect));
24315 + "S" (&pci_indirect),
24316 + "r" (__PCIBIOS_DS));
24317 /*
24318 * Zero-extend the result beyond 8 bits, do not trust the
24319 * BIOS having done it:
24320 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24321 *value &= 0xff;
24322 break;
24323 case 2:
24324 - __asm__("lcall *(%%esi); cld\n\t"
24325 + __asm__("movw %w6, %%ds\n\t"
24326 + "lcall *%%ss:(%%esi); cld\n\t"
24327 + "push %%ss\n\t"
24328 + "pop %%ds\n\t"
24329 "jc 1f\n\t"
24330 "xor %%ah, %%ah\n"
24331 "1:"
24332 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24333 : "1" (PCIBIOS_READ_CONFIG_WORD),
24334 "b" (bx),
24335 "D" ((long)reg),
24336 - "S" (&pci_indirect));
24337 + "S" (&pci_indirect),
24338 + "r" (__PCIBIOS_DS));
24339 /*
24340 * Zero-extend the result beyond 16 bits, do not trust the
24341 * BIOS having done it:
24342 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24343 *value &= 0xffff;
24344 break;
24345 case 4:
24346 - __asm__("lcall *(%%esi); cld\n\t"
24347 + __asm__("movw %w6, %%ds\n\t"
24348 + "lcall *%%ss:(%%esi); cld\n\t"
24349 + "push %%ss\n\t"
24350 + "pop %%ds\n\t"
24351 "jc 1f\n\t"
24352 "xor %%ah, %%ah\n"
24353 "1:"
24354 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24355 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24356 "b" (bx),
24357 "D" ((long)reg),
24358 - "S" (&pci_indirect));
24359 + "S" (&pci_indirect),
24360 + "r" (__PCIBIOS_DS));
24361 break;
24362 }
24363
24364 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24365
24366 switch (len) {
24367 case 1:
24368 - __asm__("lcall *(%%esi); cld\n\t"
24369 + __asm__("movw %w6, %%ds\n\t"
24370 + "lcall *%%ss:(%%esi); cld\n\t"
24371 + "push %%ss\n\t"
24372 + "pop %%ds\n\t"
24373 "jc 1f\n\t"
24374 "xor %%ah, %%ah\n"
24375 "1:"
24376 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24377 "c" (value),
24378 "b" (bx),
24379 "D" ((long)reg),
24380 - "S" (&pci_indirect));
24381 + "S" (&pci_indirect),
24382 + "r" (__PCIBIOS_DS));
24383 break;
24384 case 2:
24385 - __asm__("lcall *(%%esi); cld\n\t"
24386 + __asm__("movw %w6, %%ds\n\t"
24387 + "lcall *%%ss:(%%esi); cld\n\t"
24388 + "push %%ss\n\t"
24389 + "pop %%ds\n\t"
24390 "jc 1f\n\t"
24391 "xor %%ah, %%ah\n"
24392 "1:"
24393 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24394 "c" (value),
24395 "b" (bx),
24396 "D" ((long)reg),
24397 - "S" (&pci_indirect));
24398 + "S" (&pci_indirect),
24399 + "r" (__PCIBIOS_DS));
24400 break;
24401 case 4:
24402 - __asm__("lcall *(%%esi); cld\n\t"
24403 + __asm__("movw %w6, %%ds\n\t"
24404 + "lcall *%%ss:(%%esi); cld\n\t"
24405 + "push %%ss\n\t"
24406 + "pop %%ds\n\t"
24407 "jc 1f\n\t"
24408 "xor %%ah, %%ah\n"
24409 "1:"
24410 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24411 "c" (value),
24412 "b" (bx),
24413 "D" ((long)reg),
24414 - "S" (&pci_indirect));
24415 + "S" (&pci_indirect),
24416 + "r" (__PCIBIOS_DS));
24417 break;
24418 }
24419
24420 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24421
24422 DBG("PCI: Fetching IRQ routing table... ");
24423 __asm__("push %%es\n\t"
24424 + "movw %w8, %%ds\n\t"
24425 "push %%ds\n\t"
24426 "pop %%es\n\t"
24427 - "lcall *(%%esi); cld\n\t"
24428 + "lcall *%%ss:(%%esi); cld\n\t"
24429 "pop %%es\n\t"
24430 + "push %%ss\n\t"
24431 + "pop %%ds\n"
24432 "jc 1f\n\t"
24433 "xor %%ah, %%ah\n"
24434 "1:"
24435 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24436 "1" (0),
24437 "D" ((long) &opt),
24438 "S" (&pci_indirect),
24439 - "m" (opt)
24440 + "m" (opt),
24441 + "r" (__PCIBIOS_DS)
24442 : "memory");
24443 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24444 if (ret & 0xff00)
24445 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24446 {
24447 int ret;
24448
24449 - __asm__("lcall *(%%esi); cld\n\t"
24450 + __asm__("movw %w5, %%ds\n\t"
24451 + "lcall *%%ss:(%%esi); cld\n\t"
24452 + "push %%ss\n\t"
24453 + "pop %%ds\n"
24454 "jc 1f\n\t"
24455 "xor %%ah, %%ah\n"
24456 "1:"
24457 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24458 : "0" (PCIBIOS_SET_PCI_HW_INT),
24459 "b" ((dev->bus->number << 8) | dev->devfn),
24460 "c" ((irq << 8) | (pin + 10)),
24461 - "S" (&pci_indirect));
24462 + "S" (&pci_indirect),
24463 + "r" (__PCIBIOS_DS));
24464 return !(ret & 0xff00);
24465 }
24466 EXPORT_SYMBOL(pcibios_set_irq_routing);
24467 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24468 index 40e4469..1ab536e 100644
24469 --- a/arch/x86/platform/efi/efi_32.c
24470 +++ b/arch/x86/platform/efi/efi_32.c
24471 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24472 {
24473 struct desc_ptr gdt_descr;
24474
24475 +#ifdef CONFIG_PAX_KERNEXEC
24476 + struct desc_struct d;
24477 +#endif
24478 +
24479 local_irq_save(efi_rt_eflags);
24480
24481 load_cr3(initial_page_table);
24482 __flush_tlb_all();
24483
24484 +#ifdef CONFIG_PAX_KERNEXEC
24485 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24486 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24487 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24488 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24489 +#endif
24490 +
24491 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24492 gdt_descr.size = GDT_SIZE - 1;
24493 load_gdt(&gdt_descr);
24494 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24495 {
24496 struct desc_ptr gdt_descr;
24497
24498 +#ifdef CONFIG_PAX_KERNEXEC
24499 + struct desc_struct d;
24500 +
24501 + memset(&d, 0, sizeof d);
24502 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24503 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24504 +#endif
24505 +
24506 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24507 gdt_descr.size = GDT_SIZE - 1;
24508 load_gdt(&gdt_descr);
24509 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24510 index fbe66e6..c5c0dd2 100644
24511 --- a/arch/x86/platform/efi/efi_stub_32.S
24512 +++ b/arch/x86/platform/efi/efi_stub_32.S
24513 @@ -6,7 +6,9 @@
24514 */
24515
24516 #include <linux/linkage.h>
24517 +#include <linux/init.h>
24518 #include <asm/page_types.h>
24519 +#include <asm/segment.h>
24520
24521 /*
24522 * efi_call_phys(void *, ...) is a function with variable parameters.
24523 @@ -20,7 +22,7 @@
24524 * service functions will comply with gcc calling convention, too.
24525 */
24526
24527 -.text
24528 +__INIT
24529 ENTRY(efi_call_phys)
24530 /*
24531 * 0. The function can only be called in Linux kernel. So CS has been
24532 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24533 * The mapping of lower virtual memory has been created in prelog and
24534 * epilog.
24535 */
24536 - movl $1f, %edx
24537 - subl $__PAGE_OFFSET, %edx
24538 - jmp *%edx
24539 + movl $(__KERNEXEC_EFI_DS), %edx
24540 + mov %edx, %ds
24541 + mov %edx, %es
24542 + mov %edx, %ss
24543 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24544 1:
24545
24546 /*
24547 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24548 * parameter 2, ..., param n. To make things easy, we save the return
24549 * address of efi_call_phys in a global variable.
24550 */
24551 - popl %edx
24552 - movl %edx, saved_return_addr
24553 - /* get the function pointer into ECX*/
24554 - popl %ecx
24555 - movl %ecx, efi_rt_function_ptr
24556 - movl $2f, %edx
24557 - subl $__PAGE_OFFSET, %edx
24558 - pushl %edx
24559 + popl (saved_return_addr)
24560 + popl (efi_rt_function_ptr)
24561
24562 /*
24563 * 3. Clear PG bit in %CR0.
24564 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24565 /*
24566 * 5. Call the physical function.
24567 */
24568 - jmp *%ecx
24569 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24570
24571 -2:
24572 /*
24573 * 6. After EFI runtime service returns, control will return to
24574 * following instruction. We'd better readjust stack pointer first.
24575 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24576 movl %cr0, %edx
24577 orl $0x80000000, %edx
24578 movl %edx, %cr0
24579 - jmp 1f
24580 -1:
24581 +
24582 /*
24583 * 8. Now restore the virtual mode from flat mode by
24584 * adding EIP with PAGE_OFFSET.
24585 */
24586 - movl $1f, %edx
24587 - jmp *%edx
24588 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24589 1:
24590 + movl $(__KERNEL_DS), %edx
24591 + mov %edx, %ds
24592 + mov %edx, %es
24593 + mov %edx, %ss
24594
24595 /*
24596 * 9. Balance the stack. And because EAX contain the return value,
24597 * we'd better not clobber it.
24598 */
24599 - leal efi_rt_function_ptr, %edx
24600 - movl (%edx), %ecx
24601 - pushl %ecx
24602 + pushl (efi_rt_function_ptr)
24603
24604 /*
24605 - * 10. Push the saved return address onto the stack and return.
24606 + * 10. Return to the saved return address.
24607 */
24608 - leal saved_return_addr, %edx
24609 - movl (%edx), %ecx
24610 - pushl %ecx
24611 - ret
24612 + jmpl *(saved_return_addr)
24613 ENDPROC(efi_call_phys)
24614 .previous
24615
24616 -.data
24617 +__INITDATA
24618 saved_return_addr:
24619 .long 0
24620 efi_rt_function_ptr:
24621 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24622 index 4c07cca..2c8427d 100644
24623 --- a/arch/x86/platform/efi/efi_stub_64.S
24624 +++ b/arch/x86/platform/efi/efi_stub_64.S
24625 @@ -7,6 +7,7 @@
24626 */
24627
24628 #include <linux/linkage.h>
24629 +#include <asm/alternative-asm.h>
24630
24631 #define SAVE_XMM \
24632 mov %rsp, %rax; \
24633 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24634 call *%rdi
24635 addq $32, %rsp
24636 RESTORE_XMM
24637 + pax_force_retaddr 0, 1
24638 ret
24639 ENDPROC(efi_call0)
24640
24641 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24642 call *%rdi
24643 addq $32, %rsp
24644 RESTORE_XMM
24645 + pax_force_retaddr 0, 1
24646 ret
24647 ENDPROC(efi_call1)
24648
24649 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24650 call *%rdi
24651 addq $32, %rsp
24652 RESTORE_XMM
24653 + pax_force_retaddr 0, 1
24654 ret
24655 ENDPROC(efi_call2)
24656
24657 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24658 call *%rdi
24659 addq $32, %rsp
24660 RESTORE_XMM
24661 + pax_force_retaddr 0, 1
24662 ret
24663 ENDPROC(efi_call3)
24664
24665 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24666 call *%rdi
24667 addq $32, %rsp
24668 RESTORE_XMM
24669 + pax_force_retaddr 0, 1
24670 ret
24671 ENDPROC(efi_call4)
24672
24673 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24674 call *%rdi
24675 addq $48, %rsp
24676 RESTORE_XMM
24677 + pax_force_retaddr 0, 1
24678 ret
24679 ENDPROC(efi_call5)
24680
24681 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24682 call *%rdi
24683 addq $48, %rsp
24684 RESTORE_XMM
24685 + pax_force_retaddr 0, 1
24686 ret
24687 ENDPROC(efi_call6)
24688 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24689 index ad4ec1c..686479e 100644
24690 --- a/arch/x86/platform/mrst/mrst.c
24691 +++ b/arch/x86/platform/mrst/mrst.c
24692 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24693 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24694 int sfi_mrtc_num;
24695
24696 -static void mrst_power_off(void)
24697 +static __noreturn void mrst_power_off(void)
24698 {
24699 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24700 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24701 + BUG();
24702 }
24703
24704 -static void mrst_reboot(void)
24705 +static __noreturn void mrst_reboot(void)
24706 {
24707 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24708 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24709 else
24710 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24711 + BUG();
24712 }
24713
24714 /* parse all the mtimer info to a static mtimer array */
24715 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24716 index f10c0af..3ec1f95 100644
24717 --- a/arch/x86/power/cpu.c
24718 +++ b/arch/x86/power/cpu.c
24719 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
24720 static void fix_processor_context(void)
24721 {
24722 int cpu = smp_processor_id();
24723 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24724 + struct tss_struct *t = init_tss + cpu;
24725
24726 set_tss_desc(cpu, t); /*
24727 * This just modifies memory; should not be
24728 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
24729 */
24730
24731 #ifdef CONFIG_X86_64
24732 + pax_open_kernel();
24733 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24734 + pax_close_kernel();
24735
24736 syscall_init(); /* This sets MSR_*STAR and related */
24737 #endif
24738 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24739 index 5d17950..2253fc9 100644
24740 --- a/arch/x86/vdso/Makefile
24741 +++ b/arch/x86/vdso/Makefile
24742 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24743 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24744 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24745
24746 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24747 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24748 GCOV_PROFILE := n
24749
24750 #
24751 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24752 index 468d591..8e80a0a 100644
24753 --- a/arch/x86/vdso/vdso32-setup.c
24754 +++ b/arch/x86/vdso/vdso32-setup.c
24755 @@ -25,6 +25,7 @@
24756 #include <asm/tlbflush.h>
24757 #include <asm/vdso.h>
24758 #include <asm/proto.h>
24759 +#include <asm/mman.h>
24760
24761 enum {
24762 VDSO_DISABLED = 0,
24763 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24764 void enable_sep_cpu(void)
24765 {
24766 int cpu = get_cpu();
24767 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24768 + struct tss_struct *tss = init_tss + cpu;
24769
24770 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24771 put_cpu();
24772 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24773 gate_vma.vm_start = FIXADDR_USER_START;
24774 gate_vma.vm_end = FIXADDR_USER_END;
24775 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24776 - gate_vma.vm_page_prot = __P101;
24777 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24778 /*
24779 * Make sure the vDSO gets into every core dump.
24780 * Dumping its contents makes post-mortem fully interpretable later
24781 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24782 if (compat)
24783 addr = VDSO_HIGH_BASE;
24784 else {
24785 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24786 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24787 if (IS_ERR_VALUE(addr)) {
24788 ret = addr;
24789 goto up_fail;
24790 }
24791 }
24792
24793 - current->mm->context.vdso = (void *)addr;
24794 + current->mm->context.vdso = addr;
24795
24796 if (compat_uses_vma || !compat) {
24797 /*
24798 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24799 }
24800
24801 current_thread_info()->sysenter_return =
24802 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24803 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24804
24805 up_fail:
24806 if (ret)
24807 - current->mm->context.vdso = NULL;
24808 + current->mm->context.vdso = 0;
24809
24810 up_write(&mm->mmap_sem);
24811
24812 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24813
24814 const char *arch_vma_name(struct vm_area_struct *vma)
24815 {
24816 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24817 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24818 return "[vdso]";
24819 +
24820 +#ifdef CONFIG_PAX_SEGMEXEC
24821 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24822 + return "[vdso]";
24823 +#endif
24824 +
24825 return NULL;
24826 }
24827
24828 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24829 * Check to see if the corresponding task was created in compat vdso
24830 * mode.
24831 */
24832 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24833 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24834 return &gate_vma;
24835 return NULL;
24836 }
24837 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24838 index 153407c..611cba9 100644
24839 --- a/arch/x86/vdso/vma.c
24840 +++ b/arch/x86/vdso/vma.c
24841 @@ -16,8 +16,6 @@
24842 #include <asm/vdso.h>
24843 #include <asm/page.h>
24844
24845 -unsigned int __read_mostly vdso_enabled = 1;
24846 -
24847 extern char vdso_start[], vdso_end[];
24848 extern unsigned short vdso_sync_cpuid;
24849
24850 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24851 * unaligned here as a result of stack start randomization.
24852 */
24853 addr = PAGE_ALIGN(addr);
24854 - addr = align_addr(addr, NULL, ALIGN_VDSO);
24855
24856 return addr;
24857 }
24858 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24859 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24860 {
24861 struct mm_struct *mm = current->mm;
24862 - unsigned long addr;
24863 + unsigned long addr = 0;
24864 int ret;
24865
24866 - if (!vdso_enabled)
24867 - return 0;
24868 -
24869 down_write(&mm->mmap_sem);
24870 +
24871 +#ifdef CONFIG_PAX_RANDMMAP
24872 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24873 +#endif
24874 +
24875 addr = vdso_addr(mm->start_stack, vdso_size);
24876 + addr = align_addr(addr, NULL, ALIGN_VDSO);
24877 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24878 if (IS_ERR_VALUE(addr)) {
24879 ret = addr;
24880 goto up_fail;
24881 }
24882
24883 - current->mm->context.vdso = (void *)addr;
24884 + mm->context.vdso = addr;
24885
24886 ret = install_special_mapping(mm, addr, vdso_size,
24887 VM_READ|VM_EXEC|
24888 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24889 VM_ALWAYSDUMP,
24890 vdso_pages);
24891 - if (ret) {
24892 - current->mm->context.vdso = NULL;
24893 - goto up_fail;
24894 - }
24895 +
24896 + if (ret)
24897 + mm->context.vdso = 0;
24898
24899 up_fail:
24900 up_write(&mm->mmap_sem);
24901 return ret;
24902 }
24903 -
24904 -static __init int vdso_setup(char *s)
24905 -{
24906 - vdso_enabled = simple_strtoul(s, NULL, 0);
24907 - return 0;
24908 -}
24909 -__setup("vdso=", vdso_setup);
24910 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24911 index 1f92865..c843b20 100644
24912 --- a/arch/x86/xen/enlighten.c
24913 +++ b/arch/x86/xen/enlighten.c
24914 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24915
24916 struct shared_info xen_dummy_shared_info;
24917
24918 -void *xen_initial_gdt;
24919 -
24920 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24921 __read_mostly int xen_have_vector_callback;
24922 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24923 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24924 #endif
24925 };
24926
24927 -static void xen_reboot(int reason)
24928 +static __noreturn void xen_reboot(int reason)
24929 {
24930 struct sched_shutdown r = { .reason = reason };
24931
24932 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24933 BUG();
24934 }
24935
24936 -static void xen_restart(char *msg)
24937 +static __noreturn void xen_restart(char *msg)
24938 {
24939 xen_reboot(SHUTDOWN_reboot);
24940 }
24941
24942 -static void xen_emergency_restart(void)
24943 +static __noreturn void xen_emergency_restart(void)
24944 {
24945 xen_reboot(SHUTDOWN_reboot);
24946 }
24947
24948 -static void xen_machine_halt(void)
24949 +static __noreturn void xen_machine_halt(void)
24950 {
24951 xen_reboot(SHUTDOWN_poweroff);
24952 }
24953 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24954 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24955
24956 /* Work out if we support NX */
24957 - x86_configure_nx();
24958 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24959 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24960 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24961 + unsigned l, h;
24962 +
24963 + __supported_pte_mask |= _PAGE_NX;
24964 + rdmsr(MSR_EFER, l, h);
24965 + l |= EFER_NX;
24966 + wrmsr(MSR_EFER, l, h);
24967 + }
24968 +#endif
24969
24970 xen_setup_features();
24971
24972 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24973
24974 machine_ops = xen_machine_ops;
24975
24976 - /*
24977 - * The only reliable way to retain the initial address of the
24978 - * percpu gdt_page is to remember it here, so we can go and
24979 - * mark it RW later, when the initial percpu area is freed.
24980 - */
24981 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24982 -
24983 xen_smp_init();
24984
24985 #ifdef CONFIG_ACPI_NUMA
24986 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24987 index 87f6673..e2555a6 100644
24988 --- a/arch/x86/xen/mmu.c
24989 +++ b/arch/x86/xen/mmu.c
24990 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24991 convert_pfn_mfn(init_level4_pgt);
24992 convert_pfn_mfn(level3_ident_pgt);
24993 convert_pfn_mfn(level3_kernel_pgt);
24994 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24995 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24996 + convert_pfn_mfn(level3_vmemmap_pgt);
24997
24998 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24999 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
25000 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
25001 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
25002 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
25003 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
25004 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
25005 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
25006 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
25007 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
25008 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
25009 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
25010 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
25011
25012 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
25013 pv_mmu_ops.set_pud = xen_set_pud;
25014 #if PAGETABLE_LEVELS == 4
25015 pv_mmu_ops.set_pgd = xen_set_pgd;
25016 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
25017 #endif
25018
25019 /* This will work as long as patching hasn't happened yet
25020 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
25021 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
25022 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
25023 .set_pgd = xen_set_pgd_hyper,
25024 + .set_pgd_batched = xen_set_pgd_hyper,
25025
25026 .alloc_pud = xen_alloc_pmd_init,
25027 .release_pud = xen_release_pmd_init,
25028 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
25029 index 041d4fe..7666b7e 100644
25030 --- a/arch/x86/xen/smp.c
25031 +++ b/arch/x86/xen/smp.c
25032 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
25033 {
25034 BUG_ON(smp_processor_id() != 0);
25035 native_smp_prepare_boot_cpu();
25036 -
25037 - /* We've switched to the "real" per-cpu gdt, so make sure the
25038 - old memory can be recycled */
25039 - make_lowmem_page_readwrite(xen_initial_gdt);
25040 -
25041 xen_filter_cpu_maps();
25042 xen_setup_vcpu_info_placement();
25043 }
25044 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
25045 gdt = get_cpu_gdt_table(cpu);
25046
25047 ctxt->flags = VGCF_IN_KERNEL;
25048 - ctxt->user_regs.ds = __USER_DS;
25049 - ctxt->user_regs.es = __USER_DS;
25050 + ctxt->user_regs.ds = __KERNEL_DS;
25051 + ctxt->user_regs.es = __KERNEL_DS;
25052 ctxt->user_regs.ss = __KERNEL_DS;
25053 #ifdef CONFIG_X86_32
25054 ctxt->user_regs.fs = __KERNEL_PERCPU;
25055 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
25056 + savesegment(gs, ctxt->user_regs.gs);
25057 #else
25058 ctxt->gs_base_kernel = per_cpu_offset(cpu);
25059 #endif
25060 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
25061 int rc;
25062
25063 per_cpu(current_task, cpu) = idle;
25064 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
25065 #ifdef CONFIG_X86_32
25066 irq_ctx_init(cpu);
25067 #else
25068 clear_tsk_thread_flag(idle, TIF_FORK);
25069 - per_cpu(kernel_stack, cpu) =
25070 - (unsigned long)task_stack_page(idle) -
25071 - KERNEL_STACK_OFFSET + THREAD_SIZE;
25072 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25073 #endif
25074 xen_setup_runstate_info(cpu);
25075 xen_setup_timer(cpu);
25076 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
25077 index b040b0e..8cc4fe0 100644
25078 --- a/arch/x86/xen/xen-asm_32.S
25079 +++ b/arch/x86/xen/xen-asm_32.S
25080 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
25081 ESP_OFFSET=4 # bytes pushed onto stack
25082
25083 /*
25084 - * Store vcpu_info pointer for easy access. Do it this way to
25085 - * avoid having to reload %fs
25086 + * Store vcpu_info pointer for easy access.
25087 */
25088 #ifdef CONFIG_SMP
25089 - GET_THREAD_INFO(%eax)
25090 - movl TI_cpu(%eax), %eax
25091 - movl __per_cpu_offset(,%eax,4), %eax
25092 - mov xen_vcpu(%eax), %eax
25093 + push %fs
25094 + mov $(__KERNEL_PERCPU), %eax
25095 + mov %eax, %fs
25096 + mov PER_CPU_VAR(xen_vcpu), %eax
25097 + pop %fs
25098 #else
25099 movl xen_vcpu, %eax
25100 #endif
25101 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
25102 index aaa7291..3f77960 100644
25103 --- a/arch/x86/xen/xen-head.S
25104 +++ b/arch/x86/xen/xen-head.S
25105 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
25106 #ifdef CONFIG_X86_32
25107 mov %esi,xen_start_info
25108 mov $init_thread_union+THREAD_SIZE,%esp
25109 +#ifdef CONFIG_SMP
25110 + movl $cpu_gdt_table,%edi
25111 + movl $__per_cpu_load,%eax
25112 + movw %ax,__KERNEL_PERCPU + 2(%edi)
25113 + rorl $16,%eax
25114 + movb %al,__KERNEL_PERCPU + 4(%edi)
25115 + movb %ah,__KERNEL_PERCPU + 7(%edi)
25116 + movl $__per_cpu_end - 1,%eax
25117 + subl $__per_cpu_start,%eax
25118 + movw %ax,__KERNEL_PERCPU + 0(%edi)
25119 +#endif
25120 #else
25121 mov %rsi,xen_start_info
25122 mov $init_thread_union+THREAD_SIZE,%rsp
25123 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
25124 index b095739..8c17bcd 100644
25125 --- a/arch/x86/xen/xen-ops.h
25126 +++ b/arch/x86/xen/xen-ops.h
25127 @@ -10,8 +10,6 @@
25128 extern const char xen_hypervisor_callback[];
25129 extern const char xen_failsafe_callback[];
25130
25131 -extern void *xen_initial_gdt;
25132 -
25133 struct trap_info;
25134 void xen_copy_trap_info(struct trap_info *traps);
25135
25136 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
25137 index 58916af..9cb880b 100644
25138 --- a/block/blk-iopoll.c
25139 +++ b/block/blk-iopoll.c
25140 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
25141 }
25142 EXPORT_SYMBOL(blk_iopoll_complete);
25143
25144 -static void blk_iopoll_softirq(struct softirq_action *h)
25145 +static void blk_iopoll_softirq(void)
25146 {
25147 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
25148 int rearm = 0, budget = blk_iopoll_budget;
25149 diff --git a/block/blk-map.c b/block/blk-map.c
25150 index 623e1cd..ca1e109 100644
25151 --- a/block/blk-map.c
25152 +++ b/block/blk-map.c
25153 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
25154 if (!len || !kbuf)
25155 return -EINVAL;
25156
25157 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
25158 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
25159 if (do_copy)
25160 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
25161 else
25162 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
25163 index 1366a89..e17f54b 100644
25164 --- a/block/blk-softirq.c
25165 +++ b/block/blk-softirq.c
25166 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
25167 * Softirq action handler - move entries to local list and loop over them
25168 * while passing them to the queue registered handler.
25169 */
25170 -static void blk_done_softirq(struct softirq_action *h)
25171 +static void blk_done_softirq(void)
25172 {
25173 struct list_head *cpu_list, local_list;
25174
25175 diff --git a/block/bsg.c b/block/bsg.c
25176 index 702f131..37808bf 100644
25177 --- a/block/bsg.c
25178 +++ b/block/bsg.c
25179 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
25180 struct sg_io_v4 *hdr, struct bsg_device *bd,
25181 fmode_t has_write_perm)
25182 {
25183 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25184 + unsigned char *cmdptr;
25185 +
25186 if (hdr->request_len > BLK_MAX_CDB) {
25187 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
25188 if (!rq->cmd)
25189 return -ENOMEM;
25190 - }
25191 + cmdptr = rq->cmd;
25192 + } else
25193 + cmdptr = tmpcmd;
25194
25195 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
25196 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
25197 hdr->request_len))
25198 return -EFAULT;
25199
25200 + if (cmdptr != rq->cmd)
25201 + memcpy(rq->cmd, cmdptr, hdr->request_len);
25202 +
25203 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
25204 if (blk_verify_command(rq->cmd, has_write_perm))
25205 return -EPERM;
25206 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
25207 index 7b72502..646105c 100644
25208 --- a/block/compat_ioctl.c
25209 +++ b/block/compat_ioctl.c
25210 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
25211 err |= __get_user(f->spec1, &uf->spec1);
25212 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
25213 err |= __get_user(name, &uf->name);
25214 - f->name = compat_ptr(name);
25215 + f->name = (void __force_kernel *)compat_ptr(name);
25216 if (err) {
25217 err = -EFAULT;
25218 goto out;
25219 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
25220 index 688be8a..8a37d98 100644
25221 --- a/block/scsi_ioctl.c
25222 +++ b/block/scsi_ioctl.c
25223 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
25224 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
25225 struct sg_io_hdr *hdr, fmode_t mode)
25226 {
25227 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
25228 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25229 + unsigned char *cmdptr;
25230 +
25231 + if (rq->cmd != rq->__cmd)
25232 + cmdptr = rq->cmd;
25233 + else
25234 + cmdptr = tmpcmd;
25235 +
25236 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
25237 return -EFAULT;
25238 +
25239 + if (cmdptr != rq->cmd)
25240 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
25241 +
25242 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
25243 return -EPERM;
25244
25245 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25246 int err;
25247 unsigned int in_len, out_len, bytes, opcode, cmdlen;
25248 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
25249 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25250 + unsigned char *cmdptr;
25251
25252 if (!sic)
25253 return -EINVAL;
25254 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25255 */
25256 err = -EFAULT;
25257 rq->cmd_len = cmdlen;
25258 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
25259 +
25260 + if (rq->cmd != rq->__cmd)
25261 + cmdptr = rq->cmd;
25262 + else
25263 + cmdptr = tmpcmd;
25264 +
25265 + if (copy_from_user(cmdptr, sic->data, cmdlen))
25266 goto error;
25267
25268 + if (rq->cmd != cmdptr)
25269 + memcpy(rq->cmd, cmdptr, cmdlen);
25270 +
25271 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25272 goto error;
25273
25274 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25275 index 671d4d6..5f24030 100644
25276 --- a/crypto/cryptd.c
25277 +++ b/crypto/cryptd.c
25278 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25279
25280 struct cryptd_blkcipher_request_ctx {
25281 crypto_completion_t complete;
25282 -};
25283 +} __no_const;
25284
25285 struct cryptd_hash_ctx {
25286 struct crypto_shash *child;
25287 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25288
25289 struct cryptd_aead_request_ctx {
25290 crypto_completion_t complete;
25291 -};
25292 +} __no_const;
25293
25294 static void cryptd_queue_worker(struct work_struct *work);
25295
25296 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25297 index 5d41894..22021e4 100644
25298 --- a/drivers/acpi/apei/cper.c
25299 +++ b/drivers/acpi/apei/cper.c
25300 @@ -38,12 +38,12 @@
25301 */
25302 u64 cper_next_record_id(void)
25303 {
25304 - static atomic64_t seq;
25305 + static atomic64_unchecked_t seq;
25306
25307 - if (!atomic64_read(&seq))
25308 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
25309 + if (!atomic64_read_unchecked(&seq))
25310 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25311
25312 - return atomic64_inc_return(&seq);
25313 + return atomic64_inc_return_unchecked(&seq);
25314 }
25315 EXPORT_SYMBOL_GPL(cper_next_record_id);
25316
25317 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25318 index 6c47ae9..abfdd63 100644
25319 --- a/drivers/acpi/ec_sys.c
25320 +++ b/drivers/acpi/ec_sys.c
25321 @@ -12,6 +12,7 @@
25322 #include <linux/acpi.h>
25323 #include <linux/debugfs.h>
25324 #include <linux/module.h>
25325 +#include <linux/uaccess.h>
25326 #include "internal.h"
25327
25328 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25329 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25330 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25331 */
25332 unsigned int size = EC_SPACE_SIZE;
25333 - u8 *data = (u8 *) buf;
25334 + u8 data;
25335 loff_t init_off = *off;
25336 int err = 0;
25337
25338 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25339 size = count;
25340
25341 while (size) {
25342 - err = ec_read(*off, &data[*off - init_off]);
25343 + err = ec_read(*off, &data);
25344 if (err)
25345 return err;
25346 + if (put_user(data, &buf[*off - init_off]))
25347 + return -EFAULT;
25348 *off += 1;
25349 size--;
25350 }
25351 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25352
25353 unsigned int size = count;
25354 loff_t init_off = *off;
25355 - u8 *data = (u8 *) buf;
25356 int err = 0;
25357
25358 if (*off >= EC_SPACE_SIZE)
25359 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25360 }
25361
25362 while (size) {
25363 - u8 byte_write = data[*off - init_off];
25364 + u8 byte_write;
25365 + if (get_user(byte_write, &buf[*off - init_off]))
25366 + return -EFAULT;
25367 err = ec_write(*off, byte_write);
25368 if (err)
25369 return err;
25370 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25371 index 251c7b62..000462d 100644
25372 --- a/drivers/acpi/proc.c
25373 +++ b/drivers/acpi/proc.c
25374 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25375 size_t count, loff_t * ppos)
25376 {
25377 struct list_head *node, *next;
25378 - char strbuf[5];
25379 - char str[5] = "";
25380 - unsigned int len = count;
25381 + char strbuf[5] = {0};
25382
25383 - if (len > 4)
25384 - len = 4;
25385 - if (len < 0)
25386 + if (count > 4)
25387 + count = 4;
25388 + if (copy_from_user(strbuf, buffer, count))
25389 return -EFAULT;
25390 -
25391 - if (copy_from_user(strbuf, buffer, len))
25392 - return -EFAULT;
25393 - strbuf[len] = '\0';
25394 - sscanf(strbuf, "%s", str);
25395 + strbuf[count] = '\0';
25396
25397 mutex_lock(&acpi_device_lock);
25398 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25399 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25400 if (!dev->wakeup.flags.valid)
25401 continue;
25402
25403 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25404 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25405 if (device_can_wakeup(&dev->dev)) {
25406 bool enable = !device_may_wakeup(&dev->dev);
25407 device_set_wakeup_enable(&dev->dev, enable);
25408 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25409 index 9d7bc9f..a6fc091 100644
25410 --- a/drivers/acpi/processor_driver.c
25411 +++ b/drivers/acpi/processor_driver.c
25412 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25413 return 0;
25414 #endif
25415
25416 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25417 + BUG_ON(pr->id >= nr_cpu_ids);
25418
25419 /*
25420 * Buggy BIOS check
25421 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25422 index c04ad68..0b99473 100644
25423 --- a/drivers/ata/libata-core.c
25424 +++ b/drivers/ata/libata-core.c
25425 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25426 struct ata_port *ap;
25427 unsigned int tag;
25428
25429 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25430 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25431 ap = qc->ap;
25432
25433 qc->flags = 0;
25434 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25435 struct ata_port *ap;
25436 struct ata_link *link;
25437
25438 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25439 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25440 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25441 ap = qc->ap;
25442 link = qc->dev->link;
25443 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25444 return;
25445
25446 spin_lock(&lock);
25447 + pax_open_kernel();
25448
25449 for (cur = ops->inherits; cur; cur = cur->inherits) {
25450 void **inherit = (void **)cur;
25451 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25452 if (IS_ERR(*pp))
25453 *pp = NULL;
25454
25455 - ops->inherits = NULL;
25456 + *(struct ata_port_operations **)&ops->inherits = NULL;
25457
25458 + pax_close_kernel();
25459 spin_unlock(&lock);
25460 }
25461
25462 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25463 index e8574bb..f9f6a72 100644
25464 --- a/drivers/ata/pata_arasan_cf.c
25465 +++ b/drivers/ata/pata_arasan_cf.c
25466 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25467 /* Handle platform specific quirks */
25468 if (pdata->quirk) {
25469 if (pdata->quirk & CF_BROKEN_PIO) {
25470 - ap->ops->set_piomode = NULL;
25471 + pax_open_kernel();
25472 + *(void **)&ap->ops->set_piomode = NULL;
25473 + pax_close_kernel();
25474 ap->pio_mask = 0;
25475 }
25476 if (pdata->quirk & CF_BROKEN_MWDMA)
25477 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25478 index f9b983a..887b9d8 100644
25479 --- a/drivers/atm/adummy.c
25480 +++ b/drivers/atm/adummy.c
25481 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25482 vcc->pop(vcc, skb);
25483 else
25484 dev_kfree_skb_any(skb);
25485 - atomic_inc(&vcc->stats->tx);
25486 + atomic_inc_unchecked(&vcc->stats->tx);
25487
25488 return 0;
25489 }
25490 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25491 index f8f41e0..1f987dd 100644
25492 --- a/drivers/atm/ambassador.c
25493 +++ b/drivers/atm/ambassador.c
25494 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25495 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25496
25497 // VC layer stats
25498 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25499 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25500
25501 // free the descriptor
25502 kfree (tx_descr);
25503 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25504 dump_skb ("<<<", vc, skb);
25505
25506 // VC layer stats
25507 - atomic_inc(&atm_vcc->stats->rx);
25508 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25509 __net_timestamp(skb);
25510 // end of our responsibility
25511 atm_vcc->push (atm_vcc, skb);
25512 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25513 } else {
25514 PRINTK (KERN_INFO, "dropped over-size frame");
25515 // should we count this?
25516 - atomic_inc(&atm_vcc->stats->rx_drop);
25517 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25518 }
25519
25520 } else {
25521 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25522 }
25523
25524 if (check_area (skb->data, skb->len)) {
25525 - atomic_inc(&atm_vcc->stats->tx_err);
25526 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25527 return -ENOMEM; // ?
25528 }
25529
25530 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25531 index b22d71c..d6e1049 100644
25532 --- a/drivers/atm/atmtcp.c
25533 +++ b/drivers/atm/atmtcp.c
25534 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25535 if (vcc->pop) vcc->pop(vcc,skb);
25536 else dev_kfree_skb(skb);
25537 if (dev_data) return 0;
25538 - atomic_inc(&vcc->stats->tx_err);
25539 + atomic_inc_unchecked(&vcc->stats->tx_err);
25540 return -ENOLINK;
25541 }
25542 size = skb->len+sizeof(struct atmtcp_hdr);
25543 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25544 if (!new_skb) {
25545 if (vcc->pop) vcc->pop(vcc,skb);
25546 else dev_kfree_skb(skb);
25547 - atomic_inc(&vcc->stats->tx_err);
25548 + atomic_inc_unchecked(&vcc->stats->tx_err);
25549 return -ENOBUFS;
25550 }
25551 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25552 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25553 if (vcc->pop) vcc->pop(vcc,skb);
25554 else dev_kfree_skb(skb);
25555 out_vcc->push(out_vcc,new_skb);
25556 - atomic_inc(&vcc->stats->tx);
25557 - atomic_inc(&out_vcc->stats->rx);
25558 + atomic_inc_unchecked(&vcc->stats->tx);
25559 + atomic_inc_unchecked(&out_vcc->stats->rx);
25560 return 0;
25561 }
25562
25563 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25564 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25565 read_unlock(&vcc_sklist_lock);
25566 if (!out_vcc) {
25567 - atomic_inc(&vcc->stats->tx_err);
25568 + atomic_inc_unchecked(&vcc->stats->tx_err);
25569 goto done;
25570 }
25571 skb_pull(skb,sizeof(struct atmtcp_hdr));
25572 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25573 __net_timestamp(new_skb);
25574 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25575 out_vcc->push(out_vcc,new_skb);
25576 - atomic_inc(&vcc->stats->tx);
25577 - atomic_inc(&out_vcc->stats->rx);
25578 + atomic_inc_unchecked(&vcc->stats->tx);
25579 + atomic_inc_unchecked(&out_vcc->stats->rx);
25580 done:
25581 if (vcc->pop) vcc->pop(vcc,skb);
25582 else dev_kfree_skb(skb);
25583 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25584 index 956e9ac..133516d 100644
25585 --- a/drivers/atm/eni.c
25586 +++ b/drivers/atm/eni.c
25587 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25588 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25589 vcc->dev->number);
25590 length = 0;
25591 - atomic_inc(&vcc->stats->rx_err);
25592 + atomic_inc_unchecked(&vcc->stats->rx_err);
25593 }
25594 else {
25595 length = ATM_CELL_SIZE-1; /* no HEC */
25596 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25597 size);
25598 }
25599 eff = length = 0;
25600 - atomic_inc(&vcc->stats->rx_err);
25601 + atomic_inc_unchecked(&vcc->stats->rx_err);
25602 }
25603 else {
25604 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25605 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25606 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25607 vcc->dev->number,vcc->vci,length,size << 2,descr);
25608 length = eff = 0;
25609 - atomic_inc(&vcc->stats->rx_err);
25610 + atomic_inc_unchecked(&vcc->stats->rx_err);
25611 }
25612 }
25613 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25614 @@ -771,7 +771,7 @@ rx_dequeued++;
25615 vcc->push(vcc,skb);
25616 pushed++;
25617 }
25618 - atomic_inc(&vcc->stats->rx);
25619 + atomic_inc_unchecked(&vcc->stats->rx);
25620 }
25621 wake_up(&eni_dev->rx_wait);
25622 }
25623 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25624 PCI_DMA_TODEVICE);
25625 if (vcc->pop) vcc->pop(vcc,skb);
25626 else dev_kfree_skb_irq(skb);
25627 - atomic_inc(&vcc->stats->tx);
25628 + atomic_inc_unchecked(&vcc->stats->tx);
25629 wake_up(&eni_dev->tx_wait);
25630 dma_complete++;
25631 }
25632 @@ -1569,7 +1569,7 @@ tx_complete++;
25633 /*--------------------------------- entries ---------------------------------*/
25634
25635
25636 -static const char *media_name[] __devinitdata = {
25637 +static const char *media_name[] __devinitconst = {
25638 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25639 "UTP", "05?", "06?", "07?", /* 4- 7 */
25640 "TAXI","09?", "10?", "11?", /* 8-11 */
25641 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25642 index 5072f8a..fa52520d 100644
25643 --- a/drivers/atm/firestream.c
25644 +++ b/drivers/atm/firestream.c
25645 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25646 }
25647 }
25648
25649 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25650 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25651
25652 fs_dprintk (FS_DEBUG_TXMEM, "i");
25653 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25654 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25655 #endif
25656 skb_put (skb, qe->p1 & 0xffff);
25657 ATM_SKB(skb)->vcc = atm_vcc;
25658 - atomic_inc(&atm_vcc->stats->rx);
25659 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25660 __net_timestamp(skb);
25661 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25662 atm_vcc->push (atm_vcc, skb);
25663 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25664 kfree (pe);
25665 }
25666 if (atm_vcc)
25667 - atomic_inc(&atm_vcc->stats->rx_drop);
25668 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25669 break;
25670 case 0x1f: /* Reassembly abort: no buffers. */
25671 /* Silently increment error counter. */
25672 if (atm_vcc)
25673 - atomic_inc(&atm_vcc->stats->rx_drop);
25674 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25675 break;
25676 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25677 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25678 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25679 index 361f5ae..7fc552d 100644
25680 --- a/drivers/atm/fore200e.c
25681 +++ b/drivers/atm/fore200e.c
25682 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25683 #endif
25684 /* check error condition */
25685 if (*entry->status & STATUS_ERROR)
25686 - atomic_inc(&vcc->stats->tx_err);
25687 + atomic_inc_unchecked(&vcc->stats->tx_err);
25688 else
25689 - atomic_inc(&vcc->stats->tx);
25690 + atomic_inc_unchecked(&vcc->stats->tx);
25691 }
25692 }
25693
25694 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25695 if (skb == NULL) {
25696 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25697
25698 - atomic_inc(&vcc->stats->rx_drop);
25699 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25700 return -ENOMEM;
25701 }
25702
25703 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25704
25705 dev_kfree_skb_any(skb);
25706
25707 - atomic_inc(&vcc->stats->rx_drop);
25708 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25709 return -ENOMEM;
25710 }
25711
25712 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25713
25714 vcc->push(vcc, skb);
25715 - atomic_inc(&vcc->stats->rx);
25716 + atomic_inc_unchecked(&vcc->stats->rx);
25717
25718 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25719
25720 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25721 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25722 fore200e->atm_dev->number,
25723 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25724 - atomic_inc(&vcc->stats->rx_err);
25725 + atomic_inc_unchecked(&vcc->stats->rx_err);
25726 }
25727 }
25728
25729 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25730 goto retry_here;
25731 }
25732
25733 - atomic_inc(&vcc->stats->tx_err);
25734 + atomic_inc_unchecked(&vcc->stats->tx_err);
25735
25736 fore200e->tx_sat++;
25737 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25738 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25739 index 9a51df4..f3bb5f8 100644
25740 --- a/drivers/atm/he.c
25741 +++ b/drivers/atm/he.c
25742 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25743
25744 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25745 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25746 - atomic_inc(&vcc->stats->rx_drop);
25747 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25748 goto return_host_buffers;
25749 }
25750
25751 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25752 RBRQ_LEN_ERR(he_dev->rbrq_head)
25753 ? "LEN_ERR" : "",
25754 vcc->vpi, vcc->vci);
25755 - atomic_inc(&vcc->stats->rx_err);
25756 + atomic_inc_unchecked(&vcc->stats->rx_err);
25757 goto return_host_buffers;
25758 }
25759
25760 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25761 vcc->push(vcc, skb);
25762 spin_lock(&he_dev->global_lock);
25763
25764 - atomic_inc(&vcc->stats->rx);
25765 + atomic_inc_unchecked(&vcc->stats->rx);
25766
25767 return_host_buffers:
25768 ++pdus_assembled;
25769 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25770 tpd->vcc->pop(tpd->vcc, tpd->skb);
25771 else
25772 dev_kfree_skb_any(tpd->skb);
25773 - atomic_inc(&tpd->vcc->stats->tx_err);
25774 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25775 }
25776 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25777 return;
25778 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25779 vcc->pop(vcc, skb);
25780 else
25781 dev_kfree_skb_any(skb);
25782 - atomic_inc(&vcc->stats->tx_err);
25783 + atomic_inc_unchecked(&vcc->stats->tx_err);
25784 return -EINVAL;
25785 }
25786
25787 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25788 vcc->pop(vcc, skb);
25789 else
25790 dev_kfree_skb_any(skb);
25791 - atomic_inc(&vcc->stats->tx_err);
25792 + atomic_inc_unchecked(&vcc->stats->tx_err);
25793 return -EINVAL;
25794 }
25795 #endif
25796 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25797 vcc->pop(vcc, skb);
25798 else
25799 dev_kfree_skb_any(skb);
25800 - atomic_inc(&vcc->stats->tx_err);
25801 + atomic_inc_unchecked(&vcc->stats->tx_err);
25802 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25803 return -ENOMEM;
25804 }
25805 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25806 vcc->pop(vcc, skb);
25807 else
25808 dev_kfree_skb_any(skb);
25809 - atomic_inc(&vcc->stats->tx_err);
25810 + atomic_inc_unchecked(&vcc->stats->tx_err);
25811 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25812 return -ENOMEM;
25813 }
25814 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25815 __enqueue_tpd(he_dev, tpd, cid);
25816 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25817
25818 - atomic_inc(&vcc->stats->tx);
25819 + atomic_inc_unchecked(&vcc->stats->tx);
25820
25821 return 0;
25822 }
25823 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25824 index b812103..e391a49 100644
25825 --- a/drivers/atm/horizon.c
25826 +++ b/drivers/atm/horizon.c
25827 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25828 {
25829 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25830 // VC layer stats
25831 - atomic_inc(&vcc->stats->rx);
25832 + atomic_inc_unchecked(&vcc->stats->rx);
25833 __net_timestamp(skb);
25834 // end of our responsibility
25835 vcc->push (vcc, skb);
25836 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25837 dev->tx_iovec = NULL;
25838
25839 // VC layer stats
25840 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25841 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25842
25843 // free the skb
25844 hrz_kfree_skb (skb);
25845 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25846 index 1c05212..c28e200 100644
25847 --- a/drivers/atm/idt77252.c
25848 +++ b/drivers/atm/idt77252.c
25849 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25850 else
25851 dev_kfree_skb(skb);
25852
25853 - atomic_inc(&vcc->stats->tx);
25854 + atomic_inc_unchecked(&vcc->stats->tx);
25855 }
25856
25857 atomic_dec(&scq->used);
25858 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25859 if ((sb = dev_alloc_skb(64)) == NULL) {
25860 printk("%s: Can't allocate buffers for aal0.\n",
25861 card->name);
25862 - atomic_add(i, &vcc->stats->rx_drop);
25863 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25864 break;
25865 }
25866 if (!atm_charge(vcc, sb->truesize)) {
25867 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25868 card->name);
25869 - atomic_add(i - 1, &vcc->stats->rx_drop);
25870 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25871 dev_kfree_skb(sb);
25872 break;
25873 }
25874 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25875 ATM_SKB(sb)->vcc = vcc;
25876 __net_timestamp(sb);
25877 vcc->push(vcc, sb);
25878 - atomic_inc(&vcc->stats->rx);
25879 + atomic_inc_unchecked(&vcc->stats->rx);
25880
25881 cell += ATM_CELL_PAYLOAD;
25882 }
25883 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25884 "(CDC: %08x)\n",
25885 card->name, len, rpp->len, readl(SAR_REG_CDC));
25886 recycle_rx_pool_skb(card, rpp);
25887 - atomic_inc(&vcc->stats->rx_err);
25888 + atomic_inc_unchecked(&vcc->stats->rx_err);
25889 return;
25890 }
25891 if (stat & SAR_RSQE_CRC) {
25892 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25893 recycle_rx_pool_skb(card, rpp);
25894 - atomic_inc(&vcc->stats->rx_err);
25895 + atomic_inc_unchecked(&vcc->stats->rx_err);
25896 return;
25897 }
25898 if (skb_queue_len(&rpp->queue) > 1) {
25899 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25900 RXPRINTK("%s: Can't alloc RX skb.\n",
25901 card->name);
25902 recycle_rx_pool_skb(card, rpp);
25903 - atomic_inc(&vcc->stats->rx_err);
25904 + atomic_inc_unchecked(&vcc->stats->rx_err);
25905 return;
25906 }
25907 if (!atm_charge(vcc, skb->truesize)) {
25908 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25909 __net_timestamp(skb);
25910
25911 vcc->push(vcc, skb);
25912 - atomic_inc(&vcc->stats->rx);
25913 + atomic_inc_unchecked(&vcc->stats->rx);
25914
25915 return;
25916 }
25917 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25918 __net_timestamp(skb);
25919
25920 vcc->push(vcc, skb);
25921 - atomic_inc(&vcc->stats->rx);
25922 + atomic_inc_unchecked(&vcc->stats->rx);
25923
25924 if (skb->truesize > SAR_FB_SIZE_3)
25925 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25926 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25927 if (vcc->qos.aal != ATM_AAL0) {
25928 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25929 card->name, vpi, vci);
25930 - atomic_inc(&vcc->stats->rx_drop);
25931 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25932 goto drop;
25933 }
25934
25935 if ((sb = dev_alloc_skb(64)) == NULL) {
25936 printk("%s: Can't allocate buffers for AAL0.\n",
25937 card->name);
25938 - atomic_inc(&vcc->stats->rx_err);
25939 + atomic_inc_unchecked(&vcc->stats->rx_err);
25940 goto drop;
25941 }
25942
25943 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25944 ATM_SKB(sb)->vcc = vcc;
25945 __net_timestamp(sb);
25946 vcc->push(vcc, sb);
25947 - atomic_inc(&vcc->stats->rx);
25948 + atomic_inc_unchecked(&vcc->stats->rx);
25949
25950 drop:
25951 skb_pull(queue, 64);
25952 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25953
25954 if (vc == NULL) {
25955 printk("%s: NULL connection in send().\n", card->name);
25956 - atomic_inc(&vcc->stats->tx_err);
25957 + atomic_inc_unchecked(&vcc->stats->tx_err);
25958 dev_kfree_skb(skb);
25959 return -EINVAL;
25960 }
25961 if (!test_bit(VCF_TX, &vc->flags)) {
25962 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25963 - atomic_inc(&vcc->stats->tx_err);
25964 + atomic_inc_unchecked(&vcc->stats->tx_err);
25965 dev_kfree_skb(skb);
25966 return -EINVAL;
25967 }
25968 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25969 break;
25970 default:
25971 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25972 - atomic_inc(&vcc->stats->tx_err);
25973 + atomic_inc_unchecked(&vcc->stats->tx_err);
25974 dev_kfree_skb(skb);
25975 return -EINVAL;
25976 }
25977
25978 if (skb_shinfo(skb)->nr_frags != 0) {
25979 printk("%s: No scatter-gather yet.\n", card->name);
25980 - atomic_inc(&vcc->stats->tx_err);
25981 + atomic_inc_unchecked(&vcc->stats->tx_err);
25982 dev_kfree_skb(skb);
25983 return -EINVAL;
25984 }
25985 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25986
25987 err = queue_skb(card, vc, skb, oam);
25988 if (err) {
25989 - atomic_inc(&vcc->stats->tx_err);
25990 + atomic_inc_unchecked(&vcc->stats->tx_err);
25991 dev_kfree_skb(skb);
25992 return err;
25993 }
25994 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25995 skb = dev_alloc_skb(64);
25996 if (!skb) {
25997 printk("%s: Out of memory in send_oam().\n", card->name);
25998 - atomic_inc(&vcc->stats->tx_err);
25999 + atomic_inc_unchecked(&vcc->stats->tx_err);
26000 return -ENOMEM;
26001 }
26002 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26003 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
26004 index 3d0c2b0..45441fa 100644
26005 --- a/drivers/atm/iphase.c
26006 +++ b/drivers/atm/iphase.c
26007 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26008 status = (u_short) (buf_desc_ptr->desc_mode);
26009 if (status & (RX_CER | RX_PTE | RX_OFL))
26010 {
26011 - atomic_inc(&vcc->stats->rx_err);
26012 + atomic_inc_unchecked(&vcc->stats->rx_err);
26013 IF_ERR(printk("IA: bad packet, dropping it");)
26014 if (status & RX_CER) {
26015 IF_ERR(printk(" cause: packet CRC error\n");)
26016 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
26017 len = dma_addr - buf_addr;
26018 if (len > iadev->rx_buf_sz) {
26019 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26020 - atomic_inc(&vcc->stats->rx_err);
26021 + atomic_inc_unchecked(&vcc->stats->rx_err);
26022 goto out_free_desc;
26023 }
26024
26025 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26026 ia_vcc = INPH_IA_VCC(vcc);
26027 if (ia_vcc == NULL)
26028 {
26029 - atomic_inc(&vcc->stats->rx_err);
26030 + atomic_inc_unchecked(&vcc->stats->rx_err);
26031 dev_kfree_skb_any(skb);
26032 atm_return(vcc, atm_guess_pdu2truesize(len));
26033 goto INCR_DLE;
26034 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26035 if ((length > iadev->rx_buf_sz) || (length >
26036 (skb->len - sizeof(struct cpcs_trailer))))
26037 {
26038 - atomic_inc(&vcc->stats->rx_err);
26039 + atomic_inc_unchecked(&vcc->stats->rx_err);
26040 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26041 length, skb->len);)
26042 dev_kfree_skb_any(skb);
26043 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26044
26045 IF_RX(printk("rx_dle_intr: skb push");)
26046 vcc->push(vcc,skb);
26047 - atomic_inc(&vcc->stats->rx);
26048 + atomic_inc_unchecked(&vcc->stats->rx);
26049 iadev->rx_pkt_cnt++;
26050 }
26051 INCR_DLE:
26052 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
26053 {
26054 struct k_sonet_stats *stats;
26055 stats = &PRIV(_ia_dev[board])->sonet_stats;
26056 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26057 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26058 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26059 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26060 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26061 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26062 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26063 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26064 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26065 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26066 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26067 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26068 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26069 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26070 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26071 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26072 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26073 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26074 }
26075 ia_cmds.status = 0;
26076 break;
26077 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26078 if ((desc == 0) || (desc > iadev->num_tx_desc))
26079 {
26080 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26081 - atomic_inc(&vcc->stats->tx);
26082 + atomic_inc_unchecked(&vcc->stats->tx);
26083 if (vcc->pop)
26084 vcc->pop(vcc, skb);
26085 else
26086 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26087 ATM_DESC(skb) = vcc->vci;
26088 skb_queue_tail(&iadev->tx_dma_q, skb);
26089
26090 - atomic_inc(&vcc->stats->tx);
26091 + atomic_inc_unchecked(&vcc->stats->tx);
26092 iadev->tx_pkt_cnt++;
26093 /* Increment transaction counter */
26094 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26095
26096 #if 0
26097 /* add flow control logic */
26098 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26099 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26100 if (iavcc->vc_desc_cnt > 10) {
26101 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26102 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26103 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
26104 index f556969..0da15eb 100644
26105 --- a/drivers/atm/lanai.c
26106 +++ b/drivers/atm/lanai.c
26107 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
26108 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26109 lanai_endtx(lanai, lvcc);
26110 lanai_free_skb(lvcc->tx.atmvcc, skb);
26111 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26112 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26113 }
26114
26115 /* Try to fill the buffer - don't call unless there is backlog */
26116 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
26117 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26118 __net_timestamp(skb);
26119 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26120 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26121 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26122 out:
26123 lvcc->rx.buf.ptr = end;
26124 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26125 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26126 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26127 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26128 lanai->stats.service_rxnotaal5++;
26129 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26130 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26131 return 0;
26132 }
26133 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26134 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26135 int bytes;
26136 read_unlock(&vcc_sklist_lock);
26137 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26138 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26139 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26140 lvcc->stats.x.aal5.service_trash++;
26141 bytes = (SERVICE_GET_END(s) * 16) -
26142 (((unsigned long) lvcc->rx.buf.ptr) -
26143 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26144 }
26145 if (s & SERVICE_STREAM) {
26146 read_unlock(&vcc_sklist_lock);
26147 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26148 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26149 lvcc->stats.x.aal5.service_stream++;
26150 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26151 "PDU on VCI %d!\n", lanai->number, vci);
26152 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26153 return 0;
26154 }
26155 DPRINTK("got rx crc error on vci %d\n", vci);
26156 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26157 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26158 lvcc->stats.x.aal5.service_rxcrc++;
26159 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26160 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26161 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
26162 index 1c70c45..300718d 100644
26163 --- a/drivers/atm/nicstar.c
26164 +++ b/drivers/atm/nicstar.c
26165 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26166 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
26167 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
26168 card->index);
26169 - atomic_inc(&vcc->stats->tx_err);
26170 + atomic_inc_unchecked(&vcc->stats->tx_err);
26171 dev_kfree_skb_any(skb);
26172 return -EINVAL;
26173 }
26174 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26175 if (!vc->tx) {
26176 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
26177 card->index);
26178 - atomic_inc(&vcc->stats->tx_err);
26179 + atomic_inc_unchecked(&vcc->stats->tx_err);
26180 dev_kfree_skb_any(skb);
26181 return -EINVAL;
26182 }
26183 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26184 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
26185 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
26186 card->index);
26187 - atomic_inc(&vcc->stats->tx_err);
26188 + atomic_inc_unchecked(&vcc->stats->tx_err);
26189 dev_kfree_skb_any(skb);
26190 return -EINVAL;
26191 }
26192
26193 if (skb_shinfo(skb)->nr_frags != 0) {
26194 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26195 - atomic_inc(&vcc->stats->tx_err);
26196 + atomic_inc_unchecked(&vcc->stats->tx_err);
26197 dev_kfree_skb_any(skb);
26198 return -EINVAL;
26199 }
26200 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26201 }
26202
26203 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26204 - atomic_inc(&vcc->stats->tx_err);
26205 + atomic_inc_unchecked(&vcc->stats->tx_err);
26206 dev_kfree_skb_any(skb);
26207 return -EIO;
26208 }
26209 - atomic_inc(&vcc->stats->tx);
26210 + atomic_inc_unchecked(&vcc->stats->tx);
26211
26212 return 0;
26213 }
26214 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26215 printk
26216 ("nicstar%d: Can't allocate buffers for aal0.\n",
26217 card->index);
26218 - atomic_add(i, &vcc->stats->rx_drop);
26219 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26220 break;
26221 }
26222 if (!atm_charge(vcc, sb->truesize)) {
26223 RXPRINTK
26224 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26225 card->index);
26226 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26227 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26228 dev_kfree_skb_any(sb);
26229 break;
26230 }
26231 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26232 ATM_SKB(sb)->vcc = vcc;
26233 __net_timestamp(sb);
26234 vcc->push(vcc, sb);
26235 - atomic_inc(&vcc->stats->rx);
26236 + atomic_inc_unchecked(&vcc->stats->rx);
26237 cell += ATM_CELL_PAYLOAD;
26238 }
26239
26240 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26241 if (iovb == NULL) {
26242 printk("nicstar%d: Out of iovec buffers.\n",
26243 card->index);
26244 - atomic_inc(&vcc->stats->rx_drop);
26245 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26246 recycle_rx_buf(card, skb);
26247 return;
26248 }
26249 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26250 small or large buffer itself. */
26251 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26252 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26253 - atomic_inc(&vcc->stats->rx_err);
26254 + atomic_inc_unchecked(&vcc->stats->rx_err);
26255 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26256 NS_MAX_IOVECS);
26257 NS_PRV_IOVCNT(iovb) = 0;
26258 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26259 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26260 card->index);
26261 which_list(card, skb);
26262 - atomic_inc(&vcc->stats->rx_err);
26263 + atomic_inc_unchecked(&vcc->stats->rx_err);
26264 recycle_rx_buf(card, skb);
26265 vc->rx_iov = NULL;
26266 recycle_iov_buf(card, iovb);
26267 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26268 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26269 card->index);
26270 which_list(card, skb);
26271 - atomic_inc(&vcc->stats->rx_err);
26272 + atomic_inc_unchecked(&vcc->stats->rx_err);
26273 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26274 NS_PRV_IOVCNT(iovb));
26275 vc->rx_iov = NULL;
26276 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26277 printk(" - PDU size mismatch.\n");
26278 else
26279 printk(".\n");
26280 - atomic_inc(&vcc->stats->rx_err);
26281 + atomic_inc_unchecked(&vcc->stats->rx_err);
26282 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26283 NS_PRV_IOVCNT(iovb));
26284 vc->rx_iov = NULL;
26285 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26286 /* skb points to a small buffer */
26287 if (!atm_charge(vcc, skb->truesize)) {
26288 push_rxbufs(card, skb);
26289 - atomic_inc(&vcc->stats->rx_drop);
26290 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26291 } else {
26292 skb_put(skb, len);
26293 dequeue_sm_buf(card, skb);
26294 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26295 ATM_SKB(skb)->vcc = vcc;
26296 __net_timestamp(skb);
26297 vcc->push(vcc, skb);
26298 - atomic_inc(&vcc->stats->rx);
26299 + atomic_inc_unchecked(&vcc->stats->rx);
26300 }
26301 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26302 struct sk_buff *sb;
26303 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26304 if (len <= NS_SMBUFSIZE) {
26305 if (!atm_charge(vcc, sb->truesize)) {
26306 push_rxbufs(card, sb);
26307 - atomic_inc(&vcc->stats->rx_drop);
26308 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26309 } else {
26310 skb_put(sb, len);
26311 dequeue_sm_buf(card, sb);
26312 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26313 ATM_SKB(sb)->vcc = vcc;
26314 __net_timestamp(sb);
26315 vcc->push(vcc, sb);
26316 - atomic_inc(&vcc->stats->rx);
26317 + atomic_inc_unchecked(&vcc->stats->rx);
26318 }
26319
26320 push_rxbufs(card, skb);
26321 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26322
26323 if (!atm_charge(vcc, skb->truesize)) {
26324 push_rxbufs(card, skb);
26325 - atomic_inc(&vcc->stats->rx_drop);
26326 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26327 } else {
26328 dequeue_lg_buf(card, skb);
26329 #ifdef NS_USE_DESTRUCTORS
26330 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26331 ATM_SKB(skb)->vcc = vcc;
26332 __net_timestamp(skb);
26333 vcc->push(vcc, skb);
26334 - atomic_inc(&vcc->stats->rx);
26335 + atomic_inc_unchecked(&vcc->stats->rx);
26336 }
26337
26338 push_rxbufs(card, sb);
26339 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26340 printk
26341 ("nicstar%d: Out of huge buffers.\n",
26342 card->index);
26343 - atomic_inc(&vcc->stats->rx_drop);
26344 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26345 recycle_iovec_rx_bufs(card,
26346 (struct iovec *)
26347 iovb->data,
26348 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26349 card->hbpool.count++;
26350 } else
26351 dev_kfree_skb_any(hb);
26352 - atomic_inc(&vcc->stats->rx_drop);
26353 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26354 } else {
26355 /* Copy the small buffer to the huge buffer */
26356 sb = (struct sk_buff *)iov->iov_base;
26357 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26358 #endif /* NS_USE_DESTRUCTORS */
26359 __net_timestamp(hb);
26360 vcc->push(vcc, hb);
26361 - atomic_inc(&vcc->stats->rx);
26362 + atomic_inc_unchecked(&vcc->stats->rx);
26363 }
26364 }
26365
26366 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26367 index 5d1d076..12fbca4 100644
26368 --- a/drivers/atm/solos-pci.c
26369 +++ b/drivers/atm/solos-pci.c
26370 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26371 }
26372 atm_charge(vcc, skb->truesize);
26373 vcc->push(vcc, skb);
26374 - atomic_inc(&vcc->stats->rx);
26375 + atomic_inc_unchecked(&vcc->stats->rx);
26376 break;
26377
26378 case PKT_STATUS:
26379 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26380 vcc = SKB_CB(oldskb)->vcc;
26381
26382 if (vcc) {
26383 - atomic_inc(&vcc->stats->tx);
26384 + atomic_inc_unchecked(&vcc->stats->tx);
26385 solos_pop(vcc, oldskb);
26386 } else
26387 dev_kfree_skb_irq(oldskb);
26388 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26389 index 90f1ccc..04c4a1e 100644
26390 --- a/drivers/atm/suni.c
26391 +++ b/drivers/atm/suni.c
26392 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26393
26394
26395 #define ADD_LIMITED(s,v) \
26396 - atomic_add((v),&stats->s); \
26397 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26398 + atomic_add_unchecked((v),&stats->s); \
26399 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26400
26401
26402 static void suni_hz(unsigned long from_timer)
26403 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26404 index 5120a96..e2572bd 100644
26405 --- a/drivers/atm/uPD98402.c
26406 +++ b/drivers/atm/uPD98402.c
26407 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26408 struct sonet_stats tmp;
26409 int error = 0;
26410
26411 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26412 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26413 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26414 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26415 if (zero && !error) {
26416 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26417
26418
26419 #define ADD_LIMITED(s,v) \
26420 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26421 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26422 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26423 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26424 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26425 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26426
26427
26428 static void stat_event(struct atm_dev *dev)
26429 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26430 if (reason & uPD98402_INT_PFM) stat_event(dev);
26431 if (reason & uPD98402_INT_PCO) {
26432 (void) GET(PCOCR); /* clear interrupt cause */
26433 - atomic_add(GET(HECCT),
26434 + atomic_add_unchecked(GET(HECCT),
26435 &PRIV(dev)->sonet_stats.uncorr_hcs);
26436 }
26437 if ((reason & uPD98402_INT_RFO) &&
26438 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26439 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26440 uPD98402_INT_LOS),PIMR); /* enable them */
26441 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26442 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26443 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26444 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26445 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26446 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26447 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26448 return 0;
26449 }
26450
26451 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26452 index d889f56..17eb71e 100644
26453 --- a/drivers/atm/zatm.c
26454 +++ b/drivers/atm/zatm.c
26455 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26456 }
26457 if (!size) {
26458 dev_kfree_skb_irq(skb);
26459 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26460 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26461 continue;
26462 }
26463 if (!atm_charge(vcc,skb->truesize)) {
26464 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26465 skb->len = size;
26466 ATM_SKB(skb)->vcc = vcc;
26467 vcc->push(vcc,skb);
26468 - atomic_inc(&vcc->stats->rx);
26469 + atomic_inc_unchecked(&vcc->stats->rx);
26470 }
26471 zout(pos & 0xffff,MTA(mbx));
26472 #if 0 /* probably a stupid idea */
26473 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26474 skb_queue_head(&zatm_vcc->backlog,skb);
26475 break;
26476 }
26477 - atomic_inc(&vcc->stats->tx);
26478 + atomic_inc_unchecked(&vcc->stats->tx);
26479 wake_up(&zatm_vcc->tx_wait);
26480 }
26481
26482 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26483 index a4760e0..51283cf 100644
26484 --- a/drivers/base/devtmpfs.c
26485 +++ b/drivers/base/devtmpfs.c
26486 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26487 if (!thread)
26488 return 0;
26489
26490 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26491 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26492 if (err)
26493 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26494 else
26495 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26496 index caf995f..6f76697 100644
26497 --- a/drivers/base/power/wakeup.c
26498 +++ b/drivers/base/power/wakeup.c
26499 @@ -30,14 +30,14 @@ bool events_check_enabled;
26500 * They need to be modified together atomically, so it's better to use one
26501 * atomic variable to hold them both.
26502 */
26503 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26504 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26505
26506 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26507 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26508
26509 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26510 {
26511 - unsigned int comb = atomic_read(&combined_event_count);
26512 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26513
26514 *cnt = (comb >> IN_PROGRESS_BITS);
26515 *inpr = comb & MAX_IN_PROGRESS;
26516 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26517 ws->last_time = ktime_get();
26518
26519 /* Increment the counter of events in progress. */
26520 - atomic_inc(&combined_event_count);
26521 + atomic_inc_unchecked(&combined_event_count);
26522 }
26523
26524 /**
26525 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26526 * Increment the counter of registered wakeup events and decrement the
26527 * couter of wakeup events in progress simultaneously.
26528 */
26529 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26530 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26531 }
26532
26533 /**
26534 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26535 index b0f553b..77b928b 100644
26536 --- a/drivers/block/cciss.c
26537 +++ b/drivers/block/cciss.c
26538 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26539 int err;
26540 u32 cp;
26541
26542 + memset(&arg64, 0, sizeof(arg64));
26543 +
26544 err = 0;
26545 err |=
26546 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26547 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26548 while (!list_empty(&h->reqQ)) {
26549 c = list_entry(h->reqQ.next, CommandList_struct, list);
26550 /* can't do anything if fifo is full */
26551 - if ((h->access.fifo_full(h))) {
26552 + if ((h->access->fifo_full(h))) {
26553 dev_warn(&h->pdev->dev, "fifo full\n");
26554 break;
26555 }
26556 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26557 h->Qdepth--;
26558
26559 /* Tell the controller execute command */
26560 - h->access.submit_command(h, c);
26561 + h->access->submit_command(h, c);
26562
26563 /* Put job onto the completed Q */
26564 addQ(&h->cmpQ, c);
26565 @@ -3443,17 +3445,17 @@ startio:
26566
26567 static inline unsigned long get_next_completion(ctlr_info_t *h)
26568 {
26569 - return h->access.command_completed(h);
26570 + return h->access->command_completed(h);
26571 }
26572
26573 static inline int interrupt_pending(ctlr_info_t *h)
26574 {
26575 - return h->access.intr_pending(h);
26576 + return h->access->intr_pending(h);
26577 }
26578
26579 static inline long interrupt_not_for_us(ctlr_info_t *h)
26580 {
26581 - return ((h->access.intr_pending(h) == 0) ||
26582 + return ((h->access->intr_pending(h) == 0) ||
26583 (h->interrupts_enabled == 0));
26584 }
26585
26586 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26587 u32 a;
26588
26589 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26590 - return h->access.command_completed(h);
26591 + return h->access->command_completed(h);
26592
26593 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26594 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26595 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26596 trans_support & CFGTBL_Trans_use_short_tags);
26597
26598 /* Change the access methods to the performant access methods */
26599 - h->access = SA5_performant_access;
26600 + h->access = &SA5_performant_access;
26601 h->transMethod = CFGTBL_Trans_Performant;
26602
26603 return;
26604 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26605 if (prod_index < 0)
26606 return -ENODEV;
26607 h->product_name = products[prod_index].product_name;
26608 - h->access = *(products[prod_index].access);
26609 + h->access = products[prod_index].access;
26610
26611 if (cciss_board_disabled(h)) {
26612 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26613 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26614 }
26615
26616 /* make sure the board interrupts are off */
26617 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26618 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26619 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26620 if (rc)
26621 goto clean2;
26622 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26623 * fake ones to scoop up any residual completions.
26624 */
26625 spin_lock_irqsave(&h->lock, flags);
26626 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26627 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26628 spin_unlock_irqrestore(&h->lock, flags);
26629 free_irq(h->intr[h->intr_mode], h);
26630 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26631 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26632 dev_info(&h->pdev->dev, "Board READY.\n");
26633 dev_info(&h->pdev->dev,
26634 "Waiting for stale completions to drain.\n");
26635 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26636 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26637 msleep(10000);
26638 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26639 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26640
26641 rc = controller_reset_failed(h->cfgtable);
26642 if (rc)
26643 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26644 cciss_scsi_setup(h);
26645
26646 /* Turn the interrupts on so we can service requests */
26647 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26648 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26649
26650 /* Get the firmware version */
26651 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26652 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26653 kfree(flush_buf);
26654 if (return_code != IO_OK)
26655 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26656 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26657 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26658 free_irq(h->intr[h->intr_mode], h);
26659 }
26660
26661 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26662 index 7fda30e..eb5dfe0 100644
26663 --- a/drivers/block/cciss.h
26664 +++ b/drivers/block/cciss.h
26665 @@ -101,7 +101,7 @@ struct ctlr_info
26666 /* information about each logical volume */
26667 drive_info_struct *drv[CISS_MAX_LUN];
26668
26669 - struct access_method access;
26670 + struct access_method *access;
26671
26672 /* queue and queue Info */
26673 struct list_head reqQ;
26674 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26675 index 9125bbe..eede5c8 100644
26676 --- a/drivers/block/cpqarray.c
26677 +++ b/drivers/block/cpqarray.c
26678 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26679 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26680 goto Enomem4;
26681 }
26682 - hba[i]->access.set_intr_mask(hba[i], 0);
26683 + hba[i]->access->set_intr_mask(hba[i], 0);
26684 if (request_irq(hba[i]->intr, do_ida_intr,
26685 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26686 {
26687 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26688 add_timer(&hba[i]->timer);
26689
26690 /* Enable IRQ now that spinlock and rate limit timer are set up */
26691 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26692 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26693
26694 for(j=0; j<NWD; j++) {
26695 struct gendisk *disk = ida_gendisk[i][j];
26696 @@ -694,7 +694,7 @@ DBGINFO(
26697 for(i=0; i<NR_PRODUCTS; i++) {
26698 if (board_id == products[i].board_id) {
26699 c->product_name = products[i].product_name;
26700 - c->access = *(products[i].access);
26701 + c->access = products[i].access;
26702 break;
26703 }
26704 }
26705 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26706 hba[ctlr]->intr = intr;
26707 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26708 hba[ctlr]->product_name = products[j].product_name;
26709 - hba[ctlr]->access = *(products[j].access);
26710 + hba[ctlr]->access = products[j].access;
26711 hba[ctlr]->ctlr = ctlr;
26712 hba[ctlr]->board_id = board_id;
26713 hba[ctlr]->pci_dev = NULL; /* not PCI */
26714 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26715
26716 while((c = h->reqQ) != NULL) {
26717 /* Can't do anything if we're busy */
26718 - if (h->access.fifo_full(h) == 0)
26719 + if (h->access->fifo_full(h) == 0)
26720 return;
26721
26722 /* Get the first entry from the request Q */
26723 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26724 h->Qdepth--;
26725
26726 /* Tell the controller to do our bidding */
26727 - h->access.submit_command(h, c);
26728 + h->access->submit_command(h, c);
26729
26730 /* Get onto the completion Q */
26731 addQ(&h->cmpQ, c);
26732 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26733 unsigned long flags;
26734 __u32 a,a1;
26735
26736 - istat = h->access.intr_pending(h);
26737 + istat = h->access->intr_pending(h);
26738 /* Is this interrupt for us? */
26739 if (istat == 0)
26740 return IRQ_NONE;
26741 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26742 */
26743 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26744 if (istat & FIFO_NOT_EMPTY) {
26745 - while((a = h->access.command_completed(h))) {
26746 + while((a = h->access->command_completed(h))) {
26747 a1 = a; a &= ~3;
26748 if ((c = h->cmpQ) == NULL)
26749 {
26750 @@ -1449,11 +1449,11 @@ static int sendcmd(
26751 /*
26752 * Disable interrupt
26753 */
26754 - info_p->access.set_intr_mask(info_p, 0);
26755 + info_p->access->set_intr_mask(info_p, 0);
26756 /* Make sure there is room in the command FIFO */
26757 /* Actually it should be completely empty at this time. */
26758 for (i = 200000; i > 0; i--) {
26759 - temp = info_p->access.fifo_full(info_p);
26760 + temp = info_p->access->fifo_full(info_p);
26761 if (temp != 0) {
26762 break;
26763 }
26764 @@ -1466,7 +1466,7 @@ DBG(
26765 /*
26766 * Send the cmd
26767 */
26768 - info_p->access.submit_command(info_p, c);
26769 + info_p->access->submit_command(info_p, c);
26770 complete = pollcomplete(ctlr);
26771
26772 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26773 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26774 * we check the new geometry. Then turn interrupts back on when
26775 * we're done.
26776 */
26777 - host->access.set_intr_mask(host, 0);
26778 + host->access->set_intr_mask(host, 0);
26779 getgeometry(ctlr);
26780 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26781 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26782
26783 for(i=0; i<NWD; i++) {
26784 struct gendisk *disk = ida_gendisk[ctlr][i];
26785 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26786 /* Wait (up to 2 seconds) for a command to complete */
26787
26788 for (i = 200000; i > 0; i--) {
26789 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26790 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26791 if (done == 0) {
26792 udelay(10); /* a short fixed delay */
26793 } else
26794 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26795 index be73e9d..7fbf140 100644
26796 --- a/drivers/block/cpqarray.h
26797 +++ b/drivers/block/cpqarray.h
26798 @@ -99,7 +99,7 @@ struct ctlr_info {
26799 drv_info_t drv[NWD];
26800 struct proc_dir_entry *proc;
26801
26802 - struct access_method access;
26803 + struct access_method *access;
26804
26805 cmdlist_t *reqQ;
26806 cmdlist_t *cmpQ;
26807 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26808 index 9cf2035..bffca95 100644
26809 --- a/drivers/block/drbd/drbd_int.h
26810 +++ b/drivers/block/drbd/drbd_int.h
26811 @@ -736,7 +736,7 @@ struct drbd_request;
26812 struct drbd_epoch {
26813 struct list_head list;
26814 unsigned int barrier_nr;
26815 - atomic_t epoch_size; /* increased on every request added. */
26816 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26817 atomic_t active; /* increased on every req. added, and dec on every finished. */
26818 unsigned long flags;
26819 };
26820 @@ -1108,7 +1108,7 @@ struct drbd_conf {
26821 void *int_dig_in;
26822 void *int_dig_vv;
26823 wait_queue_head_t seq_wait;
26824 - atomic_t packet_seq;
26825 + atomic_unchecked_t packet_seq;
26826 unsigned int peer_seq;
26827 spinlock_t peer_seq_lock;
26828 unsigned int minor;
26829 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26830
26831 static inline void drbd_tcp_cork(struct socket *sock)
26832 {
26833 - int __user val = 1;
26834 + int val = 1;
26835 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26836 - (char __user *)&val, sizeof(val));
26837 + (char __force_user *)&val, sizeof(val));
26838 }
26839
26840 static inline void drbd_tcp_uncork(struct socket *sock)
26841 {
26842 - int __user val = 0;
26843 + int val = 0;
26844 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26845 - (char __user *)&val, sizeof(val));
26846 + (char __force_user *)&val, sizeof(val));
26847 }
26848
26849 static inline void drbd_tcp_nodelay(struct socket *sock)
26850 {
26851 - int __user val = 1;
26852 + int val = 1;
26853 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26854 - (char __user *)&val, sizeof(val));
26855 + (char __force_user *)&val, sizeof(val));
26856 }
26857
26858 static inline void drbd_tcp_quickack(struct socket *sock)
26859 {
26860 - int __user val = 2;
26861 + int val = 2;
26862 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26863 - (char __user *)&val, sizeof(val));
26864 + (char __force_user *)&val, sizeof(val));
26865 }
26866
26867 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26868 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26869 index 0358e55..bc33689 100644
26870 --- a/drivers/block/drbd/drbd_main.c
26871 +++ b/drivers/block/drbd/drbd_main.c
26872 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26873 p.sector = sector;
26874 p.block_id = block_id;
26875 p.blksize = blksize;
26876 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26877 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26878
26879 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26880 return false;
26881 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26882 p.sector = cpu_to_be64(req->sector);
26883 p.block_id = (unsigned long)req;
26884 p.seq_num = cpu_to_be32(req->seq_num =
26885 - atomic_add_return(1, &mdev->packet_seq));
26886 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26887
26888 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26889
26890 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26891 atomic_set(&mdev->unacked_cnt, 0);
26892 atomic_set(&mdev->local_cnt, 0);
26893 atomic_set(&mdev->net_cnt, 0);
26894 - atomic_set(&mdev->packet_seq, 0);
26895 + atomic_set_unchecked(&mdev->packet_seq, 0);
26896 atomic_set(&mdev->pp_in_use, 0);
26897 atomic_set(&mdev->pp_in_use_by_net, 0);
26898 atomic_set(&mdev->rs_sect_in, 0);
26899 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26900 mdev->receiver.t_state);
26901
26902 /* no need to lock it, I'm the only thread alive */
26903 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26904 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26905 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26906 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26907 mdev->al_writ_cnt =
26908 mdev->bm_writ_cnt =
26909 mdev->read_cnt =
26910 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26911 index af2a250..219c74b 100644
26912 --- a/drivers/block/drbd/drbd_nl.c
26913 +++ b/drivers/block/drbd/drbd_nl.c
26914 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26915 module_put(THIS_MODULE);
26916 }
26917
26918 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26919 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26920
26921 static unsigned short *
26922 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26923 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26924 cn_reply->id.idx = CN_IDX_DRBD;
26925 cn_reply->id.val = CN_VAL_DRBD;
26926
26927 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26928 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26929 cn_reply->ack = 0; /* not used here. */
26930 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26931 (int)((char *)tl - (char *)reply->tag_list);
26932 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26933 cn_reply->id.idx = CN_IDX_DRBD;
26934 cn_reply->id.val = CN_VAL_DRBD;
26935
26936 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26937 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26938 cn_reply->ack = 0; /* not used here. */
26939 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26940 (int)((char *)tl - (char *)reply->tag_list);
26941 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26942 cn_reply->id.idx = CN_IDX_DRBD;
26943 cn_reply->id.val = CN_VAL_DRBD;
26944
26945 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26946 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26947 cn_reply->ack = 0; // not used here.
26948 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26949 (int)((char*)tl - (char*)reply->tag_list);
26950 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26951 cn_reply->id.idx = CN_IDX_DRBD;
26952 cn_reply->id.val = CN_VAL_DRBD;
26953
26954 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26955 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26956 cn_reply->ack = 0; /* not used here. */
26957 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26958 (int)((char *)tl - (char *)reply->tag_list);
26959 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26960 index 43beaca..4a5b1dd 100644
26961 --- a/drivers/block/drbd/drbd_receiver.c
26962 +++ b/drivers/block/drbd/drbd_receiver.c
26963 @@ -894,7 +894,7 @@ retry:
26964 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26965 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26966
26967 - atomic_set(&mdev->packet_seq, 0);
26968 + atomic_set_unchecked(&mdev->packet_seq, 0);
26969 mdev->peer_seq = 0;
26970
26971 drbd_thread_start(&mdev->asender);
26972 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26973 do {
26974 next_epoch = NULL;
26975
26976 - epoch_size = atomic_read(&epoch->epoch_size);
26977 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26978
26979 switch (ev & ~EV_CLEANUP) {
26980 case EV_PUT:
26981 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26982 rv = FE_DESTROYED;
26983 } else {
26984 epoch->flags = 0;
26985 - atomic_set(&epoch->epoch_size, 0);
26986 + atomic_set_unchecked(&epoch->epoch_size, 0);
26987 /* atomic_set(&epoch->active, 0); is already zero */
26988 if (rv == FE_STILL_LIVE)
26989 rv = FE_RECYCLED;
26990 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26991 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26992 drbd_flush(mdev);
26993
26994 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26995 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26996 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26997 if (epoch)
26998 break;
26999 }
27000
27001 epoch = mdev->current_epoch;
27002 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
27003 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
27004
27005 D_ASSERT(atomic_read(&epoch->active) == 0);
27006 D_ASSERT(epoch->flags == 0);
27007 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
27008 }
27009
27010 epoch->flags = 0;
27011 - atomic_set(&epoch->epoch_size, 0);
27012 + atomic_set_unchecked(&epoch->epoch_size, 0);
27013 atomic_set(&epoch->active, 0);
27014
27015 spin_lock(&mdev->epoch_lock);
27016 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
27017 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
27018 list_add(&epoch->list, &mdev->current_epoch->list);
27019 mdev->current_epoch = epoch;
27020 mdev->epochs++;
27021 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
27022 spin_unlock(&mdev->peer_seq_lock);
27023
27024 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
27025 - atomic_inc(&mdev->current_epoch->epoch_size);
27026 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
27027 return drbd_drain_block(mdev, data_size);
27028 }
27029
27030 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
27031
27032 spin_lock(&mdev->epoch_lock);
27033 e->epoch = mdev->current_epoch;
27034 - atomic_inc(&e->epoch->epoch_size);
27035 + atomic_inc_unchecked(&e->epoch->epoch_size);
27036 atomic_inc(&e->epoch->active);
27037 spin_unlock(&mdev->epoch_lock);
27038
27039 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
27040 D_ASSERT(list_empty(&mdev->done_ee));
27041
27042 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
27043 - atomic_set(&mdev->current_epoch->epoch_size, 0);
27044 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
27045 D_ASSERT(list_empty(&mdev->current_epoch->list));
27046 }
27047
27048 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
27049 index 1e888c9..05cf1b0 100644
27050 --- a/drivers/block/loop.c
27051 +++ b/drivers/block/loop.c
27052 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
27053 mm_segment_t old_fs = get_fs();
27054
27055 set_fs(get_ds());
27056 - bw = file->f_op->write(file, buf, len, &pos);
27057 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
27058 set_fs(old_fs);
27059 if (likely(bw == len))
27060 return 0;
27061 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
27062 index 4364303..9adf4ee 100644
27063 --- a/drivers/char/Kconfig
27064 +++ b/drivers/char/Kconfig
27065 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
27066
27067 config DEVKMEM
27068 bool "/dev/kmem virtual device support"
27069 - default y
27070 + default n
27071 + depends on !GRKERNSEC_KMEM
27072 help
27073 Say Y here if you want to support the /dev/kmem device. The
27074 /dev/kmem device is rarely used, but can be used for certain
27075 @@ -596,6 +597,7 @@ config DEVPORT
27076 bool
27077 depends on !M68K
27078 depends on ISA || PCI
27079 + depends on !GRKERNSEC_KMEM
27080 default y
27081
27082 source "drivers/s390/char/Kconfig"
27083 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
27084 index 2e04433..22afc64 100644
27085 --- a/drivers/char/agp/frontend.c
27086 +++ b/drivers/char/agp/frontend.c
27087 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
27088 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27089 return -EFAULT;
27090
27091 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27092 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27093 return -EFAULT;
27094
27095 client = agp_find_client_by_pid(reserve.pid);
27096 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
27097 index 095ab90..afad0a4 100644
27098 --- a/drivers/char/briq_panel.c
27099 +++ b/drivers/char/briq_panel.c
27100 @@ -9,6 +9,7 @@
27101 #include <linux/types.h>
27102 #include <linux/errno.h>
27103 #include <linux/tty.h>
27104 +#include <linux/mutex.h>
27105 #include <linux/timer.h>
27106 #include <linux/kernel.h>
27107 #include <linux/wait.h>
27108 @@ -34,6 +35,7 @@ static int vfd_is_open;
27109 static unsigned char vfd[40];
27110 static int vfd_cursor;
27111 static unsigned char ledpb, led;
27112 +static DEFINE_MUTEX(vfd_mutex);
27113
27114 static void update_vfd(void)
27115 {
27116 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27117 if (!vfd_is_open)
27118 return -EBUSY;
27119
27120 + mutex_lock(&vfd_mutex);
27121 for (;;) {
27122 char c;
27123 if (!indx)
27124 break;
27125 - if (get_user(c, buf))
27126 + if (get_user(c, buf)) {
27127 + mutex_unlock(&vfd_mutex);
27128 return -EFAULT;
27129 + }
27130 if (esc) {
27131 set_led(c);
27132 esc = 0;
27133 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27134 buf++;
27135 }
27136 update_vfd();
27137 + mutex_unlock(&vfd_mutex);
27138
27139 return len;
27140 }
27141 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
27142 index f773a9d..65cd683 100644
27143 --- a/drivers/char/genrtc.c
27144 +++ b/drivers/char/genrtc.c
27145 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
27146 switch (cmd) {
27147
27148 case RTC_PLL_GET:
27149 + memset(&pll, 0, sizeof(pll));
27150 if (get_rtc_pll(&pll))
27151 return -EINVAL;
27152 else
27153 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
27154 index 0833896..cccce52 100644
27155 --- a/drivers/char/hpet.c
27156 +++ b/drivers/char/hpet.c
27157 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
27158 }
27159
27160 static int
27161 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
27162 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
27163 struct hpet_info *info)
27164 {
27165 struct hpet_timer __iomem *timer;
27166 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
27167 index 58c0e63..46c16bf 100644
27168 --- a/drivers/char/ipmi/ipmi_msghandler.c
27169 +++ b/drivers/char/ipmi/ipmi_msghandler.c
27170 @@ -415,7 +415,7 @@ struct ipmi_smi {
27171 struct proc_dir_entry *proc_dir;
27172 char proc_dir_name[10];
27173
27174 - atomic_t stats[IPMI_NUM_STATS];
27175 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27176
27177 /*
27178 * run_to_completion duplicate of smb_info, smi_info
27179 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27180
27181
27182 #define ipmi_inc_stat(intf, stat) \
27183 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27184 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27185 #define ipmi_get_stat(intf, stat) \
27186 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27187 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27188
27189 static int is_lan_addr(struct ipmi_addr *addr)
27190 {
27191 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
27192 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27193 init_waitqueue_head(&intf->waitq);
27194 for (i = 0; i < IPMI_NUM_STATS; i++)
27195 - atomic_set(&intf->stats[i], 0);
27196 + atomic_set_unchecked(&intf->stats[i], 0);
27197
27198 intf->proc_dir = NULL;
27199
27200 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27201 index 9397ab4..d01bee1 100644
27202 --- a/drivers/char/ipmi/ipmi_si_intf.c
27203 +++ b/drivers/char/ipmi/ipmi_si_intf.c
27204 @@ -277,7 +277,7 @@ struct smi_info {
27205 unsigned char slave_addr;
27206
27207 /* Counters and things for the proc filesystem. */
27208 - atomic_t stats[SI_NUM_STATS];
27209 + atomic_unchecked_t stats[SI_NUM_STATS];
27210
27211 struct task_struct *thread;
27212
27213 @@ -286,9 +286,9 @@ struct smi_info {
27214 };
27215
27216 #define smi_inc_stat(smi, stat) \
27217 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27218 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27219 #define smi_get_stat(smi, stat) \
27220 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27221 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27222
27223 #define SI_MAX_PARMS 4
27224
27225 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27226 atomic_set(&new_smi->req_events, 0);
27227 new_smi->run_to_completion = 0;
27228 for (i = 0; i < SI_NUM_STATS; i++)
27229 - atomic_set(&new_smi->stats[i], 0);
27230 + atomic_set_unchecked(&new_smi->stats[i], 0);
27231
27232 new_smi->interrupt_disabled = 1;
27233 atomic_set(&new_smi->stop_operation, 0);
27234 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27235 index 1aeaaba..e018570 100644
27236 --- a/drivers/char/mbcs.c
27237 +++ b/drivers/char/mbcs.c
27238 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27239 return 0;
27240 }
27241
27242 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27243 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27244 {
27245 .part_num = MBCS_PART_NUM,
27246 .mfg_num = MBCS_MFG_NUM,
27247 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27248 index 1451790..f705c30 100644
27249 --- a/drivers/char/mem.c
27250 +++ b/drivers/char/mem.c
27251 @@ -18,6 +18,7 @@
27252 #include <linux/raw.h>
27253 #include <linux/tty.h>
27254 #include <linux/capability.h>
27255 +#include <linux/security.h>
27256 #include <linux/ptrace.h>
27257 #include <linux/device.h>
27258 #include <linux/highmem.h>
27259 @@ -35,6 +36,10 @@
27260 # include <linux/efi.h>
27261 #endif
27262
27263 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27264 +extern const struct file_operations grsec_fops;
27265 +#endif
27266 +
27267 static inline unsigned long size_inside_page(unsigned long start,
27268 unsigned long size)
27269 {
27270 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27271
27272 while (cursor < to) {
27273 if (!devmem_is_allowed(pfn)) {
27274 +#ifdef CONFIG_GRKERNSEC_KMEM
27275 + gr_handle_mem_readwrite(from, to);
27276 +#else
27277 printk(KERN_INFO
27278 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27279 current->comm, from, to);
27280 +#endif
27281 return 0;
27282 }
27283 cursor += PAGE_SIZE;
27284 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27285 }
27286 return 1;
27287 }
27288 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27289 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27290 +{
27291 + return 0;
27292 +}
27293 #else
27294 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27295 {
27296 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27297
27298 while (count > 0) {
27299 unsigned long remaining;
27300 + char *temp;
27301
27302 sz = size_inside_page(p, count);
27303
27304 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27305 if (!ptr)
27306 return -EFAULT;
27307
27308 - remaining = copy_to_user(buf, ptr, sz);
27309 +#ifdef CONFIG_PAX_USERCOPY
27310 + temp = kmalloc(sz, GFP_KERNEL);
27311 + if (!temp) {
27312 + unxlate_dev_mem_ptr(p, ptr);
27313 + return -ENOMEM;
27314 + }
27315 + memcpy(temp, ptr, sz);
27316 +#else
27317 + temp = ptr;
27318 +#endif
27319 +
27320 + remaining = copy_to_user(buf, temp, sz);
27321 +
27322 +#ifdef CONFIG_PAX_USERCOPY
27323 + kfree(temp);
27324 +#endif
27325 +
27326 unxlate_dev_mem_ptr(p, ptr);
27327 if (remaining)
27328 return -EFAULT;
27329 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27330 size_t count, loff_t *ppos)
27331 {
27332 unsigned long p = *ppos;
27333 - ssize_t low_count, read, sz;
27334 + ssize_t low_count, read, sz, err = 0;
27335 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27336 - int err = 0;
27337
27338 read = 0;
27339 if (p < (unsigned long) high_memory) {
27340 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27341 }
27342 #endif
27343 while (low_count > 0) {
27344 + char *temp;
27345 +
27346 sz = size_inside_page(p, low_count);
27347
27348 /*
27349 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27350 */
27351 kbuf = xlate_dev_kmem_ptr((char *)p);
27352
27353 - if (copy_to_user(buf, kbuf, sz))
27354 +#ifdef CONFIG_PAX_USERCOPY
27355 + temp = kmalloc(sz, GFP_KERNEL);
27356 + if (!temp)
27357 + return -ENOMEM;
27358 + memcpy(temp, kbuf, sz);
27359 +#else
27360 + temp = kbuf;
27361 +#endif
27362 +
27363 + err = copy_to_user(buf, temp, sz);
27364 +
27365 +#ifdef CONFIG_PAX_USERCOPY
27366 + kfree(temp);
27367 +#endif
27368 +
27369 + if (err)
27370 return -EFAULT;
27371 buf += sz;
27372 p += sz;
27373 @@ -867,6 +914,9 @@ static const struct memdev {
27374 #ifdef CONFIG_CRASH_DUMP
27375 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27376 #endif
27377 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27378 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27379 +#endif
27380 };
27381
27382 static int memory_open(struct inode *inode, struct file *filp)
27383 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27384 index da3cfee..a5a6606 100644
27385 --- a/drivers/char/nvram.c
27386 +++ b/drivers/char/nvram.c
27387 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27388
27389 spin_unlock_irq(&rtc_lock);
27390
27391 - if (copy_to_user(buf, contents, tmp - contents))
27392 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27393 return -EFAULT;
27394
27395 *ppos = i;
27396 diff --git a/drivers/char/random.c b/drivers/char/random.c
27397 index 6035ab8..bdfe4fd 100644
27398 --- a/drivers/char/random.c
27399 +++ b/drivers/char/random.c
27400 @@ -261,8 +261,13 @@
27401 /*
27402 * Configuration information
27403 */
27404 +#ifdef CONFIG_GRKERNSEC_RANDNET
27405 +#define INPUT_POOL_WORDS 512
27406 +#define OUTPUT_POOL_WORDS 128
27407 +#else
27408 #define INPUT_POOL_WORDS 128
27409 #define OUTPUT_POOL_WORDS 32
27410 +#endif
27411 #define SEC_XFER_SIZE 512
27412 #define EXTRACT_SIZE 10
27413
27414 @@ -300,10 +305,17 @@ static struct poolinfo {
27415 int poolwords;
27416 int tap1, tap2, tap3, tap4, tap5;
27417 } poolinfo_table[] = {
27418 +#ifdef CONFIG_GRKERNSEC_RANDNET
27419 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27420 + { 512, 411, 308, 208, 104, 1 },
27421 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27422 + { 128, 103, 76, 51, 25, 1 },
27423 +#else
27424 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27425 { 128, 103, 76, 51, 25, 1 },
27426 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27427 { 32, 26, 20, 14, 7, 1 },
27428 +#endif
27429 #if 0
27430 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27431 { 2048, 1638, 1231, 819, 411, 1 },
27432 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27433
27434 extract_buf(r, tmp);
27435 i = min_t(int, nbytes, EXTRACT_SIZE);
27436 - if (copy_to_user(buf, tmp, i)) {
27437 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27438 ret = -EFAULT;
27439 break;
27440 }
27441 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27442 #include <linux/sysctl.h>
27443
27444 static int min_read_thresh = 8, min_write_thresh;
27445 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27446 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27447 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27448 static char sysctl_bootid[16];
27449
27450 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27451 index 1ee8ce7..b778bef 100644
27452 --- a/drivers/char/sonypi.c
27453 +++ b/drivers/char/sonypi.c
27454 @@ -55,6 +55,7 @@
27455 #include <asm/uaccess.h>
27456 #include <asm/io.h>
27457 #include <asm/system.h>
27458 +#include <asm/local.h>
27459
27460 #include <linux/sonypi.h>
27461
27462 @@ -491,7 +492,7 @@ static struct sonypi_device {
27463 spinlock_t fifo_lock;
27464 wait_queue_head_t fifo_proc_list;
27465 struct fasync_struct *fifo_async;
27466 - int open_count;
27467 + local_t open_count;
27468 int model;
27469 struct input_dev *input_jog_dev;
27470 struct input_dev *input_key_dev;
27471 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27472 static int sonypi_misc_release(struct inode *inode, struct file *file)
27473 {
27474 mutex_lock(&sonypi_device.lock);
27475 - sonypi_device.open_count--;
27476 + local_dec(&sonypi_device.open_count);
27477 mutex_unlock(&sonypi_device.lock);
27478 return 0;
27479 }
27480 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27481 {
27482 mutex_lock(&sonypi_device.lock);
27483 /* Flush input queue on first open */
27484 - if (!sonypi_device.open_count)
27485 + if (!local_read(&sonypi_device.open_count))
27486 kfifo_reset(&sonypi_device.fifo);
27487 - sonypi_device.open_count++;
27488 + local_inc(&sonypi_device.open_count);
27489 mutex_unlock(&sonypi_device.lock);
27490
27491 return 0;
27492 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27493 index 361a1df..2471eee 100644
27494 --- a/drivers/char/tpm/tpm.c
27495 +++ b/drivers/char/tpm/tpm.c
27496 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27497 chip->vendor.req_complete_val)
27498 goto out_recv;
27499
27500 - if ((status == chip->vendor.req_canceled)) {
27501 + if (status == chip->vendor.req_canceled) {
27502 dev_err(chip->dev, "Operation Canceled\n");
27503 rc = -ECANCELED;
27504 goto out;
27505 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27506 index 0636520..169c1d0 100644
27507 --- a/drivers/char/tpm/tpm_bios.c
27508 +++ b/drivers/char/tpm/tpm_bios.c
27509 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27510 event = addr;
27511
27512 if ((event->event_type == 0 && event->event_size == 0) ||
27513 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27514 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27515 return NULL;
27516
27517 return addr;
27518 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27519 return NULL;
27520
27521 if ((event->event_type == 0 && event->event_size == 0) ||
27522 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27523 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27524 return NULL;
27525
27526 (*pos)++;
27527 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27528 int i;
27529
27530 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27531 - seq_putc(m, data[i]);
27532 + if (!seq_putc(m, data[i]))
27533 + return -EFAULT;
27534
27535 return 0;
27536 }
27537 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27538 log->bios_event_log_end = log->bios_event_log + len;
27539
27540 virt = acpi_os_map_memory(start, len);
27541 + if (!virt) {
27542 + kfree(log->bios_event_log);
27543 + log->bios_event_log = NULL;
27544 + return -EFAULT;
27545 + }
27546
27547 - memcpy(log->bios_event_log, virt, len);
27548 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27549
27550 acpi_os_unmap_memory(virt, len);
27551 return 0;
27552 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27553 index 8e3c46d..c139b99 100644
27554 --- a/drivers/char/virtio_console.c
27555 +++ b/drivers/char/virtio_console.c
27556 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27557 if (to_user) {
27558 ssize_t ret;
27559
27560 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27561 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27562 if (ret)
27563 return -EFAULT;
27564 } else {
27565 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27566 if (!port_has_data(port) && !port->host_connected)
27567 return 0;
27568
27569 - return fill_readbuf(port, ubuf, count, true);
27570 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27571 }
27572
27573 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27574 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27575 index eb1d864..39ee5a7 100644
27576 --- a/drivers/dma/dmatest.c
27577 +++ b/drivers/dma/dmatest.c
27578 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27579 }
27580 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27581 cnt = dmatest_add_threads(dtc, DMA_PQ);
27582 - thread_count += cnt > 0 ?: 0;
27583 + thread_count += cnt > 0 ? cnt : 0;
27584 }
27585
27586 pr_info("dmatest: Started %u threads using %s\n",
27587 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27588 index c9eee6d..f9d5280 100644
27589 --- a/drivers/edac/amd64_edac.c
27590 +++ b/drivers/edac/amd64_edac.c
27591 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27592 * PCI core identifies what devices are on a system during boot, and then
27593 * inquiry this table to see if this driver is for a given device found.
27594 */
27595 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27596 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27597 {
27598 .vendor = PCI_VENDOR_ID_AMD,
27599 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27600 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27601 index e47e73b..348e0bd 100644
27602 --- a/drivers/edac/amd76x_edac.c
27603 +++ b/drivers/edac/amd76x_edac.c
27604 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27605 edac_mc_free(mci);
27606 }
27607
27608 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27609 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27610 {
27611 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27612 AMD762},
27613 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27614 index 1af531a..3a8ff27 100644
27615 --- a/drivers/edac/e752x_edac.c
27616 +++ b/drivers/edac/e752x_edac.c
27617 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27618 edac_mc_free(mci);
27619 }
27620
27621 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27622 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27623 {
27624 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27625 E7520},
27626 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27627 index 6ffb6d2..383d8d7 100644
27628 --- a/drivers/edac/e7xxx_edac.c
27629 +++ b/drivers/edac/e7xxx_edac.c
27630 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27631 edac_mc_free(mci);
27632 }
27633
27634 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27635 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27636 {
27637 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27638 E7205},
27639 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27640 index 495198a..ac08c85 100644
27641 --- a/drivers/edac/edac_pci_sysfs.c
27642 +++ b/drivers/edac/edac_pci_sysfs.c
27643 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27644 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27645 static int edac_pci_poll_msec = 1000; /* one second workq period */
27646
27647 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27648 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27649 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27650 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27651
27652 static struct kobject *edac_pci_top_main_kobj;
27653 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27654 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27655 edac_printk(KERN_CRIT, EDAC_PCI,
27656 "Signaled System Error on %s\n",
27657 pci_name(dev));
27658 - atomic_inc(&pci_nonparity_count);
27659 + atomic_inc_unchecked(&pci_nonparity_count);
27660 }
27661
27662 if (status & (PCI_STATUS_PARITY)) {
27663 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27664 "Master Data Parity Error on %s\n",
27665 pci_name(dev));
27666
27667 - atomic_inc(&pci_parity_count);
27668 + atomic_inc_unchecked(&pci_parity_count);
27669 }
27670
27671 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27672 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27673 "Detected Parity Error on %s\n",
27674 pci_name(dev));
27675
27676 - atomic_inc(&pci_parity_count);
27677 + atomic_inc_unchecked(&pci_parity_count);
27678 }
27679 }
27680
27681 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27682 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27683 "Signaled System Error on %s\n",
27684 pci_name(dev));
27685 - atomic_inc(&pci_nonparity_count);
27686 + atomic_inc_unchecked(&pci_nonparity_count);
27687 }
27688
27689 if (status & (PCI_STATUS_PARITY)) {
27690 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27691 "Master Data Parity Error on "
27692 "%s\n", pci_name(dev));
27693
27694 - atomic_inc(&pci_parity_count);
27695 + atomic_inc_unchecked(&pci_parity_count);
27696 }
27697
27698 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27699 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27700 "Detected Parity Error on %s\n",
27701 pci_name(dev));
27702
27703 - atomic_inc(&pci_parity_count);
27704 + atomic_inc_unchecked(&pci_parity_count);
27705 }
27706 }
27707 }
27708 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27709 if (!check_pci_errors)
27710 return;
27711
27712 - before_count = atomic_read(&pci_parity_count);
27713 + before_count = atomic_read_unchecked(&pci_parity_count);
27714
27715 /* scan all PCI devices looking for a Parity Error on devices and
27716 * bridges.
27717 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27718 /* Only if operator has selected panic on PCI Error */
27719 if (edac_pci_get_panic_on_pe()) {
27720 /* If the count is different 'after' from 'before' */
27721 - if (before_count != atomic_read(&pci_parity_count))
27722 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27723 panic("EDAC: PCI Parity Error");
27724 }
27725 }
27726 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27727 index c0510b3..6e2a954 100644
27728 --- a/drivers/edac/i3000_edac.c
27729 +++ b/drivers/edac/i3000_edac.c
27730 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27731 edac_mc_free(mci);
27732 }
27733
27734 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27735 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27736 {
27737 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27738 I3000},
27739 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27740 index aa08497..7e6822a 100644
27741 --- a/drivers/edac/i3200_edac.c
27742 +++ b/drivers/edac/i3200_edac.c
27743 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27744 edac_mc_free(mci);
27745 }
27746
27747 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27748 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27749 {
27750 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27751 I3200},
27752 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27753 index 4dc3ac2..67d05a6 100644
27754 --- a/drivers/edac/i5000_edac.c
27755 +++ b/drivers/edac/i5000_edac.c
27756 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27757 *
27758 * The "E500P" device is the first device supported.
27759 */
27760 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27761 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27762 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27763 .driver_data = I5000P},
27764
27765 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27766 index bcbdeec..9886d16 100644
27767 --- a/drivers/edac/i5100_edac.c
27768 +++ b/drivers/edac/i5100_edac.c
27769 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27770 edac_mc_free(mci);
27771 }
27772
27773 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27774 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27775 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27776 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27777 { 0, }
27778 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27779 index 74d6ec34..baff517 100644
27780 --- a/drivers/edac/i5400_edac.c
27781 +++ b/drivers/edac/i5400_edac.c
27782 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27783 *
27784 * The "E500P" device is the first device supported.
27785 */
27786 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27787 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27788 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27789 {0,} /* 0 terminated list. */
27790 };
27791 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27792 index 6104dba..e7ea8e1 100644
27793 --- a/drivers/edac/i7300_edac.c
27794 +++ b/drivers/edac/i7300_edac.c
27795 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27796 *
27797 * Has only 8086:360c PCI ID
27798 */
27799 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27800 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27801 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27802 {0,} /* 0 terminated list. */
27803 };
27804 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27805 index 70ad892..178943c 100644
27806 --- a/drivers/edac/i7core_edac.c
27807 +++ b/drivers/edac/i7core_edac.c
27808 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27809 /*
27810 * pci_device_id table for which devices we are looking for
27811 */
27812 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27813 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27814 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27815 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27816 {0,} /* 0 terminated list. */
27817 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27818 index 4329d39..f3022ef 100644
27819 --- a/drivers/edac/i82443bxgx_edac.c
27820 +++ b/drivers/edac/i82443bxgx_edac.c
27821 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27822
27823 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27824
27825 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27826 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27827 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27828 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27829 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27830 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27831 index 931a057..fd28340 100644
27832 --- a/drivers/edac/i82860_edac.c
27833 +++ b/drivers/edac/i82860_edac.c
27834 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27835 edac_mc_free(mci);
27836 }
27837
27838 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27839 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27840 {
27841 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27842 I82860},
27843 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27844 index 33864c6..01edc61 100644
27845 --- a/drivers/edac/i82875p_edac.c
27846 +++ b/drivers/edac/i82875p_edac.c
27847 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27848 edac_mc_free(mci);
27849 }
27850
27851 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27852 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27853 {
27854 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27855 I82875P},
27856 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27857 index a5da732..983363b 100644
27858 --- a/drivers/edac/i82975x_edac.c
27859 +++ b/drivers/edac/i82975x_edac.c
27860 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27861 edac_mc_free(mci);
27862 }
27863
27864 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27865 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27866 {
27867 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27868 I82975X
27869 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27870 index 0106747..0b40417 100644
27871 --- a/drivers/edac/mce_amd.h
27872 +++ b/drivers/edac/mce_amd.h
27873 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27874 bool (*dc_mce)(u16, u8);
27875 bool (*ic_mce)(u16, u8);
27876 bool (*nb_mce)(u16, u8);
27877 -};
27878 +} __no_const;
27879
27880 void amd_report_gart_errors(bool);
27881 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27882 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27883 index b153674..ad2ba9b 100644
27884 --- a/drivers/edac/r82600_edac.c
27885 +++ b/drivers/edac/r82600_edac.c
27886 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27887 edac_mc_free(mci);
27888 }
27889
27890 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27891 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27892 {
27893 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27894 },
27895 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27896 index 7a402bf..af0b211 100644
27897 --- a/drivers/edac/sb_edac.c
27898 +++ b/drivers/edac/sb_edac.c
27899 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27900 /*
27901 * pci_device_id table for which devices we are looking for
27902 */
27903 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27904 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27905 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27906 {0,} /* 0 terminated list. */
27907 };
27908 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27909 index b6f47de..c5acf3a 100644
27910 --- a/drivers/edac/x38_edac.c
27911 +++ b/drivers/edac/x38_edac.c
27912 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27913 edac_mc_free(mci);
27914 }
27915
27916 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27917 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27918 {
27919 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27920 X38},
27921 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27922 index 85661b0..c784559a 100644
27923 --- a/drivers/firewire/core-card.c
27924 +++ b/drivers/firewire/core-card.c
27925 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27926
27927 void fw_core_remove_card(struct fw_card *card)
27928 {
27929 - struct fw_card_driver dummy_driver = dummy_driver_template;
27930 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27931
27932 card->driver->update_phy_reg(card, 4,
27933 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27934 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27935 index 4799393..37bd3ab 100644
27936 --- a/drivers/firewire/core-cdev.c
27937 +++ b/drivers/firewire/core-cdev.c
27938 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27939 int ret;
27940
27941 if ((request->channels == 0 && request->bandwidth == 0) ||
27942 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27943 - request->bandwidth < 0)
27944 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27945 return -EINVAL;
27946
27947 r = kmalloc(sizeof(*r), GFP_KERNEL);
27948 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27949 index 855ab3f..11f4bbd 100644
27950 --- a/drivers/firewire/core-transaction.c
27951 +++ b/drivers/firewire/core-transaction.c
27952 @@ -37,6 +37,7 @@
27953 #include <linux/timer.h>
27954 #include <linux/types.h>
27955 #include <linux/workqueue.h>
27956 +#include <linux/sched.h>
27957
27958 #include <asm/byteorder.h>
27959
27960 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27961 index b45be57..5fad18b 100644
27962 --- a/drivers/firewire/core.h
27963 +++ b/drivers/firewire/core.h
27964 @@ -101,6 +101,7 @@ struct fw_card_driver {
27965
27966 int (*stop_iso)(struct fw_iso_context *ctx);
27967 };
27968 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27969
27970 void fw_card_initialize(struct fw_card *card,
27971 const struct fw_card_driver *driver, struct device *device);
27972 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27973 index 153980b..4b4d046 100644
27974 --- a/drivers/firmware/dmi_scan.c
27975 +++ b/drivers/firmware/dmi_scan.c
27976 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27977 }
27978 }
27979 else {
27980 - /*
27981 - * no iounmap() for that ioremap(); it would be a no-op, but
27982 - * it's so early in setup that sucker gets confused into doing
27983 - * what it shouldn't if we actually call it.
27984 - */
27985 p = dmi_ioremap(0xF0000, 0x10000);
27986 if (p == NULL)
27987 goto error;
27988 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27989 if (buf == NULL)
27990 return -1;
27991
27992 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27993 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27994
27995 iounmap(buf);
27996 return 0;
27997 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27998 index 98723cb..10ca85b 100644
27999 --- a/drivers/gpio/gpio-vr41xx.c
28000 +++ b/drivers/gpio/gpio-vr41xx.c
28001 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28002 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28003 maskl, pendl, maskh, pendh);
28004
28005 - atomic_inc(&irq_err_count);
28006 + atomic_inc_unchecked(&irq_err_count);
28007
28008 return -EINVAL;
28009 }
28010 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
28011 index 8323fc3..5c1d755 100644
28012 --- a/drivers/gpu/drm/drm_crtc.c
28013 +++ b/drivers/gpu/drm/drm_crtc.c
28014 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28015 */
28016 if ((out_resp->count_modes >= mode_count) && mode_count) {
28017 copied = 0;
28018 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
28019 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
28020 list_for_each_entry(mode, &connector->modes, head) {
28021 drm_crtc_convert_to_umode(&u_mode, mode);
28022 if (copy_to_user(mode_ptr + copied,
28023 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28024
28025 if ((out_resp->count_props >= props_count) && props_count) {
28026 copied = 0;
28027 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
28028 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
28029 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
28030 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
28031 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
28032 if (connector->property_ids[i] != 0) {
28033 if (put_user(connector->property_ids[i],
28034 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28035
28036 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
28037 copied = 0;
28038 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
28039 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
28040 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
28041 if (connector->encoder_ids[i] != 0) {
28042 if (put_user(connector->encoder_ids[i],
28043 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
28044 }
28045
28046 for (i = 0; i < crtc_req->count_connectors; i++) {
28047 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
28048 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
28049 if (get_user(out_id, &set_connectors_ptr[i])) {
28050 ret = -EFAULT;
28051 goto out;
28052 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
28053 fb = obj_to_fb(obj);
28054
28055 num_clips = r->num_clips;
28056 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
28057 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
28058
28059 if (!num_clips != !clips_ptr) {
28060 ret = -EINVAL;
28061 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28062 out_resp->flags = property->flags;
28063
28064 if ((out_resp->count_values >= value_count) && value_count) {
28065 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
28066 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
28067 for (i = 0; i < value_count; i++) {
28068 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
28069 ret = -EFAULT;
28070 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28071 if (property->flags & DRM_MODE_PROP_ENUM) {
28072 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
28073 copied = 0;
28074 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
28075 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
28076 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
28077
28078 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
28079 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28080 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
28081 copied = 0;
28082 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
28083 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
28084 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
28085
28086 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
28087 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
28088 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28089 struct drm_mode_get_blob *out_resp = data;
28090 struct drm_property_blob *blob;
28091 int ret = 0;
28092 - void *blob_ptr;
28093 + void __user *blob_ptr;
28094
28095 if (!drm_core_check_feature(dev, DRIVER_MODESET))
28096 return -EINVAL;
28097 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28098 blob = obj_to_blob(obj);
28099
28100 if (out_resp->length == blob->length) {
28101 - blob_ptr = (void *)(unsigned long)out_resp->data;
28102 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
28103 if (copy_to_user(blob_ptr, blob->data, blob->length)){
28104 ret = -EFAULT;
28105 goto done;
28106 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
28107 index d2619d7..bd6bd00 100644
28108 --- a/drivers/gpu/drm/drm_crtc_helper.c
28109 +++ b/drivers/gpu/drm/drm_crtc_helper.c
28110 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
28111 struct drm_crtc *tmp;
28112 int crtc_mask = 1;
28113
28114 - WARN(!crtc, "checking null crtc?\n");
28115 + BUG_ON(!crtc);
28116
28117 dev = crtc->dev;
28118
28119 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
28120 index 40c187c..5746164 100644
28121 --- a/drivers/gpu/drm/drm_drv.c
28122 +++ b/drivers/gpu/drm/drm_drv.c
28123 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
28124 /**
28125 * Copy and IOCTL return string to user space
28126 */
28127 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
28128 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
28129 {
28130 int len;
28131
28132 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
28133
28134 dev = file_priv->minor->dev;
28135 atomic_inc(&dev->ioctl_count);
28136 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28137 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28138 ++file_priv->ioctl_count;
28139
28140 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28141 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
28142 index 828bf65..cdaa0e9 100644
28143 --- a/drivers/gpu/drm/drm_fops.c
28144 +++ b/drivers/gpu/drm/drm_fops.c
28145 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
28146 }
28147
28148 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28149 - atomic_set(&dev->counts[i], 0);
28150 + atomic_set_unchecked(&dev->counts[i], 0);
28151
28152 dev->sigdata.lock = NULL;
28153
28154 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
28155
28156 retcode = drm_open_helper(inode, filp, dev);
28157 if (!retcode) {
28158 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28159 - if (!dev->open_count++)
28160 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28161 + if (local_inc_return(&dev->open_count) == 1)
28162 retcode = drm_setup(dev);
28163 }
28164 if (!retcode) {
28165 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
28166
28167 mutex_lock(&drm_global_mutex);
28168
28169 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28170 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28171
28172 if (dev->driver->preclose)
28173 dev->driver->preclose(dev, file_priv);
28174 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
28175 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28176 task_pid_nr(current),
28177 (long)old_encode_dev(file_priv->minor->device),
28178 - dev->open_count);
28179 + local_read(&dev->open_count));
28180
28181 /* Release any auth tokens that might point to this file_priv,
28182 (do that under the drm_global_mutex) */
28183 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
28184 * End inline drm_release
28185 */
28186
28187 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28188 - if (!--dev->open_count) {
28189 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28190 + if (local_dec_and_test(&dev->open_count)) {
28191 if (atomic_read(&dev->ioctl_count)) {
28192 DRM_ERROR("Device busy: %d\n",
28193 atomic_read(&dev->ioctl_count));
28194 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
28195 index c87dc96..326055d 100644
28196 --- a/drivers/gpu/drm/drm_global.c
28197 +++ b/drivers/gpu/drm/drm_global.c
28198 @@ -36,7 +36,7 @@
28199 struct drm_global_item {
28200 struct mutex mutex;
28201 void *object;
28202 - int refcount;
28203 + atomic_t refcount;
28204 };
28205
28206 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28207 @@ -49,7 +49,7 @@ void drm_global_init(void)
28208 struct drm_global_item *item = &glob[i];
28209 mutex_init(&item->mutex);
28210 item->object = NULL;
28211 - item->refcount = 0;
28212 + atomic_set(&item->refcount, 0);
28213 }
28214 }
28215
28216 @@ -59,7 +59,7 @@ void drm_global_release(void)
28217 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28218 struct drm_global_item *item = &glob[i];
28219 BUG_ON(item->object != NULL);
28220 - BUG_ON(item->refcount != 0);
28221 + BUG_ON(atomic_read(&item->refcount) != 0);
28222 }
28223 }
28224
28225 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28226 void *object;
28227
28228 mutex_lock(&item->mutex);
28229 - if (item->refcount == 0) {
28230 + if (atomic_read(&item->refcount) == 0) {
28231 item->object = kzalloc(ref->size, GFP_KERNEL);
28232 if (unlikely(item->object == NULL)) {
28233 ret = -ENOMEM;
28234 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28235 goto out_err;
28236
28237 }
28238 - ++item->refcount;
28239 + atomic_inc(&item->refcount);
28240 ref->object = item->object;
28241 object = item->object;
28242 mutex_unlock(&item->mutex);
28243 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28244 struct drm_global_item *item = &glob[ref->global_type];
28245
28246 mutex_lock(&item->mutex);
28247 - BUG_ON(item->refcount == 0);
28248 + BUG_ON(atomic_read(&item->refcount) == 0);
28249 BUG_ON(ref->object != item->object);
28250 - if (--item->refcount == 0) {
28251 + if (atomic_dec_and_test(&item->refcount)) {
28252 ref->release(ref);
28253 item->object = NULL;
28254 }
28255 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28256 index ab1162d..42587b2 100644
28257 --- a/drivers/gpu/drm/drm_info.c
28258 +++ b/drivers/gpu/drm/drm_info.c
28259 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28260 struct drm_local_map *map;
28261 struct drm_map_list *r_list;
28262
28263 - /* Hardcoded from _DRM_FRAME_BUFFER,
28264 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28265 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28266 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28267 + static const char * const types[] = {
28268 + [_DRM_FRAME_BUFFER] = "FB",
28269 + [_DRM_REGISTERS] = "REG",
28270 + [_DRM_SHM] = "SHM",
28271 + [_DRM_AGP] = "AGP",
28272 + [_DRM_SCATTER_GATHER] = "SG",
28273 + [_DRM_CONSISTENT] = "PCI",
28274 + [_DRM_GEM] = "GEM" };
28275 const char *type;
28276 int i;
28277
28278 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28279 map = r_list->map;
28280 if (!map)
28281 continue;
28282 - if (map->type < 0 || map->type > 5)
28283 + if (map->type >= ARRAY_SIZE(types))
28284 type = "??";
28285 else
28286 type = types[map->type];
28287 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28288 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28289 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28290 vma->vm_flags & VM_IO ? 'i' : '-',
28291 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28292 + 0);
28293 +#else
28294 vma->vm_pgoff);
28295 +#endif
28296
28297 #if defined(__i386__)
28298 pgprot = pgprot_val(vma->vm_page_prot);
28299 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28300 index ddd70db..40321e6 100644
28301 --- a/drivers/gpu/drm/drm_ioc32.c
28302 +++ b/drivers/gpu/drm/drm_ioc32.c
28303 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28304 request = compat_alloc_user_space(nbytes);
28305 if (!access_ok(VERIFY_WRITE, request, nbytes))
28306 return -EFAULT;
28307 - list = (struct drm_buf_desc *) (request + 1);
28308 + list = (struct drm_buf_desc __user *) (request + 1);
28309
28310 if (__put_user(count, &request->count)
28311 || __put_user(list, &request->list))
28312 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28313 request = compat_alloc_user_space(nbytes);
28314 if (!access_ok(VERIFY_WRITE, request, nbytes))
28315 return -EFAULT;
28316 - list = (struct drm_buf_pub *) (request + 1);
28317 + list = (struct drm_buf_pub __user *) (request + 1);
28318
28319 if (__put_user(count, &request->count)
28320 || __put_user(list, &request->list))
28321 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28322 index 904d7e9..ab88581 100644
28323 --- a/drivers/gpu/drm/drm_ioctl.c
28324 +++ b/drivers/gpu/drm/drm_ioctl.c
28325 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28326 stats->data[i].value =
28327 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28328 else
28329 - stats->data[i].value = atomic_read(&dev->counts[i]);
28330 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28331 stats->data[i].type = dev->types[i];
28332 }
28333
28334 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28335 index 632ae24..244cf4a 100644
28336 --- a/drivers/gpu/drm/drm_lock.c
28337 +++ b/drivers/gpu/drm/drm_lock.c
28338 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28339 if (drm_lock_take(&master->lock, lock->context)) {
28340 master->lock.file_priv = file_priv;
28341 master->lock.lock_time = jiffies;
28342 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28343 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28344 break; /* Got lock */
28345 }
28346
28347 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28348 return -EINVAL;
28349 }
28350
28351 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28352 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28353
28354 if (drm_lock_free(&master->lock, lock->context)) {
28355 /* FIXME: Should really bail out here. */
28356 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28357 index 8f371e8..9f85d52 100644
28358 --- a/drivers/gpu/drm/i810/i810_dma.c
28359 +++ b/drivers/gpu/drm/i810/i810_dma.c
28360 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28361 dma->buflist[vertex->idx],
28362 vertex->discard, vertex->used);
28363
28364 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28365 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28366 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28367 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28368 sarea_priv->last_enqueue = dev_priv->counter - 1;
28369 sarea_priv->last_dispatch = (int)hw_status[5];
28370
28371 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28372 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28373 mc->last_render);
28374
28375 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28376 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28377 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28378 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28379 sarea_priv->last_enqueue = dev_priv->counter - 1;
28380 sarea_priv->last_dispatch = (int)hw_status[5];
28381
28382 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28383 index c9339f4..f5e1b9d 100644
28384 --- a/drivers/gpu/drm/i810/i810_drv.h
28385 +++ b/drivers/gpu/drm/i810/i810_drv.h
28386 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28387 int page_flipping;
28388
28389 wait_queue_head_t irq_queue;
28390 - atomic_t irq_received;
28391 - atomic_t irq_emitted;
28392 + atomic_unchecked_t irq_received;
28393 + atomic_unchecked_t irq_emitted;
28394
28395 int front_offset;
28396 } drm_i810_private_t;
28397 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28398 index b2e3c97..58cf079 100644
28399 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28400 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28401 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28402 I915_READ(GTIMR));
28403 }
28404 seq_printf(m, "Interrupts received: %d\n",
28405 - atomic_read(&dev_priv->irq_received));
28406 + atomic_read_unchecked(&dev_priv->irq_received));
28407 for (i = 0; i < I915_NUM_RINGS; i++) {
28408 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28409 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28410 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28411 return ret;
28412
28413 if (opregion->header)
28414 - seq_write(m, opregion->header, OPREGION_SIZE);
28415 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28416
28417 mutex_unlock(&dev->struct_mutex);
28418
28419 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28420 index c4da951..3c59c5c 100644
28421 --- a/drivers/gpu/drm/i915/i915_dma.c
28422 +++ b/drivers/gpu/drm/i915/i915_dma.c
28423 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28424 bool can_switch;
28425
28426 spin_lock(&dev->count_lock);
28427 - can_switch = (dev->open_count == 0);
28428 + can_switch = (local_read(&dev->open_count) == 0);
28429 spin_unlock(&dev->count_lock);
28430 return can_switch;
28431 }
28432 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28433 index ae294a0..1755461 100644
28434 --- a/drivers/gpu/drm/i915/i915_drv.h
28435 +++ b/drivers/gpu/drm/i915/i915_drv.h
28436 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28437 /* render clock increase/decrease */
28438 /* display clock increase/decrease */
28439 /* pll clock increase/decrease */
28440 -};
28441 +} __no_const;
28442
28443 struct intel_device_info {
28444 u8 gen;
28445 @@ -318,7 +318,7 @@ typedef struct drm_i915_private {
28446 int current_page;
28447 int page_flipping;
28448
28449 - atomic_t irq_received;
28450 + atomic_unchecked_t irq_received;
28451
28452 /* protects the irq masks */
28453 spinlock_t irq_lock;
28454 @@ -893,7 +893,7 @@ struct drm_i915_gem_object {
28455 * will be page flipped away on the next vblank. When it
28456 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28457 */
28458 - atomic_t pending_flip;
28459 + atomic_unchecked_t pending_flip;
28460 };
28461
28462 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28463 @@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28464 extern void intel_teardown_gmbus(struct drm_device *dev);
28465 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28466 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28467 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28468 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28469 {
28470 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28471 }
28472 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28473 index b9da890..cad1d98 100644
28474 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28475 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28476 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28477 i915_gem_clflush_object(obj);
28478
28479 if (obj->base.pending_write_domain)
28480 - cd->flips |= atomic_read(&obj->pending_flip);
28481 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28482
28483 /* The actual obj->write_domain will be updated with
28484 * pending_write_domain after we emit the accumulated flush for all
28485 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28486
28487 static int
28488 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28489 - int count)
28490 + unsigned int count)
28491 {
28492 - int i;
28493 + unsigned int i;
28494
28495 for (i = 0; i < count; i++) {
28496 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28497 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28498 index d47a53b..61154c2 100644
28499 --- a/drivers/gpu/drm/i915/i915_irq.c
28500 +++ b/drivers/gpu/drm/i915/i915_irq.c
28501 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28502 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28503 struct drm_i915_master_private *master_priv;
28504
28505 - atomic_inc(&dev_priv->irq_received);
28506 + atomic_inc_unchecked(&dev_priv->irq_received);
28507
28508 /* disable master interrupt before clearing iir */
28509 de_ier = I915_READ(DEIER);
28510 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28511 struct drm_i915_master_private *master_priv;
28512 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28513
28514 - atomic_inc(&dev_priv->irq_received);
28515 + atomic_inc_unchecked(&dev_priv->irq_received);
28516
28517 if (IS_GEN6(dev))
28518 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28519 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28520 int ret = IRQ_NONE, pipe;
28521 bool blc_event = false;
28522
28523 - atomic_inc(&dev_priv->irq_received);
28524 + atomic_inc_unchecked(&dev_priv->irq_received);
28525
28526 iir = I915_READ(IIR);
28527
28528 @@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28529 {
28530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28531
28532 - atomic_set(&dev_priv->irq_received, 0);
28533 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28534
28535 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28536 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28537 @@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28538 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28539 int pipe;
28540
28541 - atomic_set(&dev_priv->irq_received, 0);
28542 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28543
28544 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28545 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28546 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28547 index daa5743..c0757a9 100644
28548 --- a/drivers/gpu/drm/i915/intel_display.c
28549 +++ b/drivers/gpu/drm/i915/intel_display.c
28550 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28551
28552 wait_event(dev_priv->pending_flip_queue,
28553 atomic_read(&dev_priv->mm.wedged) ||
28554 - atomic_read(&obj->pending_flip) == 0);
28555 + atomic_read_unchecked(&obj->pending_flip) == 0);
28556
28557 /* Big Hammer, we also need to ensure that any pending
28558 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28559 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28560 obj = to_intel_framebuffer(crtc->fb)->obj;
28561 dev_priv = crtc->dev->dev_private;
28562 wait_event(dev_priv->pending_flip_queue,
28563 - atomic_read(&obj->pending_flip) == 0);
28564 + atomic_read_unchecked(&obj->pending_flip) == 0);
28565 }
28566
28567 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28568 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28569
28570 atomic_clear_mask(1 << intel_crtc->plane,
28571 &obj->pending_flip.counter);
28572 - if (atomic_read(&obj->pending_flip) == 0)
28573 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28574 wake_up(&dev_priv->pending_flip_queue);
28575
28576 schedule_work(&work->work);
28577 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28578 /* Block clients from rendering to the new back buffer until
28579 * the flip occurs and the object is no longer visible.
28580 */
28581 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28582 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28583
28584 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28585 if (ret)
28586 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28587 return 0;
28588
28589 cleanup_pending:
28590 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28591 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28592 drm_gem_object_unreference(&work->old_fb_obj->base);
28593 drm_gem_object_unreference(&obj->base);
28594 mutex_unlock(&dev->struct_mutex);
28595 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28596 index 54558a0..2d97005 100644
28597 --- a/drivers/gpu/drm/mga/mga_drv.h
28598 +++ b/drivers/gpu/drm/mga/mga_drv.h
28599 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28600 u32 clear_cmd;
28601 u32 maccess;
28602
28603 - atomic_t vbl_received; /**< Number of vblanks received. */
28604 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28605 wait_queue_head_t fence_queue;
28606 - atomic_t last_fence_retired;
28607 + atomic_unchecked_t last_fence_retired;
28608 u32 next_fence_to_post;
28609
28610 unsigned int fb_cpp;
28611 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28612 index 2581202..f230a8d9 100644
28613 --- a/drivers/gpu/drm/mga/mga_irq.c
28614 +++ b/drivers/gpu/drm/mga/mga_irq.c
28615 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28616 if (crtc != 0)
28617 return 0;
28618
28619 - return atomic_read(&dev_priv->vbl_received);
28620 + return atomic_read_unchecked(&dev_priv->vbl_received);
28621 }
28622
28623
28624 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28625 /* VBLANK interrupt */
28626 if (status & MGA_VLINEPEN) {
28627 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28628 - atomic_inc(&dev_priv->vbl_received);
28629 + atomic_inc_unchecked(&dev_priv->vbl_received);
28630 drm_handle_vblank(dev, 0);
28631 handled = 1;
28632 }
28633 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28634 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28635 MGA_WRITE(MGA_PRIMEND, prim_end);
28636
28637 - atomic_inc(&dev_priv->last_fence_retired);
28638 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28639 DRM_WAKEUP(&dev_priv->fence_queue);
28640 handled = 1;
28641 }
28642 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28643 * using fences.
28644 */
28645 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28646 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28647 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28648 - *sequence) <= (1 << 23)));
28649
28650 *sequence = cur_fence;
28651 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28652 index 5fc201b..7b032b9 100644
28653 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28654 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28655 @@ -201,7 +201,7 @@ struct methods {
28656 const char desc[8];
28657 void (*loadbios)(struct drm_device *, uint8_t *);
28658 const bool rw;
28659 -};
28660 +} __do_const;
28661
28662 static struct methods shadow_methods[] = {
28663 { "PRAMIN", load_vbios_pramin, true },
28664 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28665 struct bit_table {
28666 const char id;
28667 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28668 -};
28669 +} __no_const;
28670
28671 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28672
28673 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28674 index 4c0be3a..5757582 100644
28675 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28676 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28677 @@ -238,7 +238,7 @@ struct nouveau_channel {
28678 struct list_head pending;
28679 uint32_t sequence;
28680 uint32_t sequence_ack;
28681 - atomic_t last_sequence_irq;
28682 + atomic_unchecked_t last_sequence_irq;
28683 struct nouveau_vma vma;
28684 } fence;
28685
28686 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28687 u32 handle, u16 class);
28688 void (*set_tile_region)(struct drm_device *dev, int i);
28689 void (*tlb_flush)(struct drm_device *, int engine);
28690 -};
28691 +} __no_const;
28692
28693 struct nouveau_instmem_engine {
28694 void *priv;
28695 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28696 struct nouveau_mc_engine {
28697 int (*init)(struct drm_device *dev);
28698 void (*takedown)(struct drm_device *dev);
28699 -};
28700 +} __no_const;
28701
28702 struct nouveau_timer_engine {
28703 int (*init)(struct drm_device *dev);
28704 void (*takedown)(struct drm_device *dev);
28705 uint64_t (*read)(struct drm_device *dev);
28706 -};
28707 +} __no_const;
28708
28709 struct nouveau_fb_engine {
28710 int num_tiles;
28711 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28712 void (*put)(struct drm_device *, struct nouveau_mem **);
28713
28714 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28715 -};
28716 +} __no_const;
28717
28718 struct nouveau_engine {
28719 struct nouveau_instmem_engine instmem;
28720 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
28721 struct drm_global_reference mem_global_ref;
28722 struct ttm_bo_global_ref bo_global_ref;
28723 struct ttm_bo_device bdev;
28724 - atomic_t validate_sequence;
28725 + atomic_unchecked_t validate_sequence;
28726 } ttm;
28727
28728 struct {
28729 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28730 index 2f6daae..c9d7b9e 100644
28731 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28732 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28733 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28734 if (USE_REFCNT(dev))
28735 sequence = nvchan_rd32(chan, 0x48);
28736 else
28737 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28738 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28739
28740 if (chan->fence.sequence_ack == sequence)
28741 goto out;
28742 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28743 return ret;
28744 }
28745
28746 - atomic_set(&chan->fence.last_sequence_irq, 0);
28747 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28748 return 0;
28749 }
28750
28751 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28752 index 7ce3fde..cb3ea04 100644
28753 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28754 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28755 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28756 int trycnt = 0;
28757 int ret, i;
28758
28759 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28760 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28761 retry:
28762 if (++trycnt > 100000) {
28763 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28764 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28765 index d8831ab..0ba8356 100644
28766 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28767 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28768 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28769 bool can_switch;
28770
28771 spin_lock(&dev->count_lock);
28772 - can_switch = (dev->open_count == 0);
28773 + can_switch = (local_read(&dev->open_count) == 0);
28774 spin_unlock(&dev->count_lock);
28775 return can_switch;
28776 }
28777 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28778 index dbdea8e..cd6eeeb 100644
28779 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28780 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28781 @@ -554,7 +554,7 @@ static int
28782 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28783 u32 class, u32 mthd, u32 data)
28784 {
28785 - atomic_set(&chan->fence.last_sequence_irq, data);
28786 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28787 return 0;
28788 }
28789
28790 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28791 index bcac90b..53bfc76 100644
28792 --- a/drivers/gpu/drm/r128/r128_cce.c
28793 +++ b/drivers/gpu/drm/r128/r128_cce.c
28794 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28795
28796 /* GH: Simple idle check.
28797 */
28798 - atomic_set(&dev_priv->idle_count, 0);
28799 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28800
28801 /* We don't support anything other than bus-mastering ring mode,
28802 * but the ring can be in either AGP or PCI space for the ring
28803 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28804 index 930c71b..499aded 100644
28805 --- a/drivers/gpu/drm/r128/r128_drv.h
28806 +++ b/drivers/gpu/drm/r128/r128_drv.h
28807 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28808 int is_pci;
28809 unsigned long cce_buffers_offset;
28810
28811 - atomic_t idle_count;
28812 + atomic_unchecked_t idle_count;
28813
28814 int page_flipping;
28815 int current_page;
28816 u32 crtc_offset;
28817 u32 crtc_offset_cntl;
28818
28819 - atomic_t vbl_received;
28820 + atomic_unchecked_t vbl_received;
28821
28822 u32 color_fmt;
28823 unsigned int front_offset;
28824 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28825 index 429d5a0..7e899ed 100644
28826 --- a/drivers/gpu/drm/r128/r128_irq.c
28827 +++ b/drivers/gpu/drm/r128/r128_irq.c
28828 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28829 if (crtc != 0)
28830 return 0;
28831
28832 - return atomic_read(&dev_priv->vbl_received);
28833 + return atomic_read_unchecked(&dev_priv->vbl_received);
28834 }
28835
28836 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28837 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28838 /* VBLANK interrupt */
28839 if (status & R128_CRTC_VBLANK_INT) {
28840 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28841 - atomic_inc(&dev_priv->vbl_received);
28842 + atomic_inc_unchecked(&dev_priv->vbl_received);
28843 drm_handle_vblank(dev, 0);
28844 return IRQ_HANDLED;
28845 }
28846 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28847 index a9e33ce..09edd4b 100644
28848 --- a/drivers/gpu/drm/r128/r128_state.c
28849 +++ b/drivers/gpu/drm/r128/r128_state.c
28850 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28851
28852 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28853 {
28854 - if (atomic_read(&dev_priv->idle_count) == 0)
28855 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28856 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28857 else
28858 - atomic_set(&dev_priv->idle_count, 0);
28859 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28860 }
28861
28862 #endif
28863 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28864 index 5a82b6b..9e69c73 100644
28865 --- a/drivers/gpu/drm/radeon/mkregtable.c
28866 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28867 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28868 regex_t mask_rex;
28869 regmatch_t match[4];
28870 char buf[1024];
28871 - size_t end;
28872 + long end;
28873 int len;
28874 int done = 0;
28875 int r;
28876 unsigned o;
28877 struct offset *offset;
28878 char last_reg_s[10];
28879 - int last_reg;
28880 + unsigned long last_reg;
28881
28882 if (regcomp
28883 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28884 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28885 index 8227e76..ce0b195 100644
28886 --- a/drivers/gpu/drm/radeon/radeon.h
28887 +++ b/drivers/gpu/drm/radeon/radeon.h
28888 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28889 */
28890 struct radeon_fence_driver {
28891 uint32_t scratch_reg;
28892 - atomic_t seq;
28893 + atomic_unchecked_t seq;
28894 uint32_t last_seq;
28895 unsigned long last_jiffies;
28896 unsigned long last_timeout;
28897 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28898 int x2, int y2);
28899 void (*draw_auto)(struct radeon_device *rdev);
28900 void (*set_default_state)(struct radeon_device *rdev);
28901 -};
28902 +} __no_const;
28903
28904 struct r600_blit {
28905 struct mutex mutex;
28906 @@ -954,7 +954,7 @@ struct radeon_asic {
28907 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28908 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28909 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28910 -};
28911 +} __no_const;
28912
28913 /*
28914 * Asic structures
28915 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28916 index 9231564..78b00fd 100644
28917 --- a/drivers/gpu/drm/radeon/radeon_device.c
28918 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28919 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28920 bool can_switch;
28921
28922 spin_lock(&dev->count_lock);
28923 - can_switch = (dev->open_count == 0);
28924 + can_switch = (local_read(&dev->open_count) == 0);
28925 spin_unlock(&dev->count_lock);
28926 return can_switch;
28927 }
28928 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28929 index a1b59ca..86f2d44 100644
28930 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28931 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28932 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28933
28934 /* SW interrupt */
28935 wait_queue_head_t swi_queue;
28936 - atomic_t swi_emitted;
28937 + atomic_unchecked_t swi_emitted;
28938 int vblank_crtc;
28939 uint32_t irq_enable_reg;
28940 uint32_t r500_disp_irq_reg;
28941 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28942 index 76ec0e9..6feb1a3 100644
28943 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28944 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28945 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28946 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28947 return 0;
28948 }
28949 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28950 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28951 if (!rdev->cp.ready)
28952 /* FIXME: cp is not running assume everythings is done right
28953 * away
28954 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28955 return r;
28956 }
28957 radeon_fence_write(rdev, 0);
28958 - atomic_set(&rdev->fence_drv.seq, 0);
28959 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28960 INIT_LIST_HEAD(&rdev->fence_drv.created);
28961 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28962 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28963 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28964 index 48b7cea..342236f 100644
28965 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28966 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28967 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28968 request = compat_alloc_user_space(sizeof(*request));
28969 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28970 || __put_user(req32.param, &request->param)
28971 - || __put_user((void __user *)(unsigned long)req32.value,
28972 + || __put_user((unsigned long)req32.value,
28973 &request->value))
28974 return -EFAULT;
28975
28976 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28977 index 00da384..32f972d 100644
28978 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28979 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28980 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28981 unsigned int ret;
28982 RING_LOCALS;
28983
28984 - atomic_inc(&dev_priv->swi_emitted);
28985 - ret = atomic_read(&dev_priv->swi_emitted);
28986 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28987 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28988
28989 BEGIN_RING(4);
28990 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28991 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28992 drm_radeon_private_t *dev_priv =
28993 (drm_radeon_private_t *) dev->dev_private;
28994
28995 - atomic_set(&dev_priv->swi_emitted, 0);
28996 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28997 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28998
28999 dev->max_vblank_count = 0x001fffff;
29000 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
29001 index e8422ae..d22d4a8 100644
29002 --- a/drivers/gpu/drm/radeon/radeon_state.c
29003 +++ b/drivers/gpu/drm/radeon/radeon_state.c
29004 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
29005 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
29006 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
29007
29008 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
29009 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
29010 sarea_priv->nbox * sizeof(depth_boxes[0])))
29011 return -EFAULT;
29012
29013 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
29014 {
29015 drm_radeon_private_t *dev_priv = dev->dev_private;
29016 drm_radeon_getparam_t *param = data;
29017 - int value;
29018 + int value = 0;
29019
29020 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29021
29022 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
29023 index 0b5468b..9c4b308 100644
29024 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
29025 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
29026 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29027 }
29028 if (unlikely(ttm_vm_ops == NULL)) {
29029 ttm_vm_ops = vma->vm_ops;
29030 - radeon_ttm_vm_ops = *ttm_vm_ops;
29031 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29032 + pax_open_kernel();
29033 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
29034 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29035 + pax_close_kernel();
29036 }
29037 vma->vm_ops = &radeon_ttm_vm_ops;
29038 return 0;
29039 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
29040 index a9049ed..501f284 100644
29041 --- a/drivers/gpu/drm/radeon/rs690.c
29042 +++ b/drivers/gpu/drm/radeon/rs690.c
29043 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
29044 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29045 rdev->pm.sideport_bandwidth.full)
29046 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29047 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
29048 + read_delay_latency.full = dfixed_const(800 * 1000);
29049 read_delay_latency.full = dfixed_div(read_delay_latency,
29050 rdev->pm.igp_sideport_mclk);
29051 + a.full = dfixed_const(370);
29052 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
29053 } else {
29054 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29055 rdev->pm.k8_bandwidth.full)
29056 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29057 index 727e93d..1565650 100644
29058 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
29059 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29060 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
29061 static int ttm_pool_mm_shrink(struct shrinker *shrink,
29062 struct shrink_control *sc)
29063 {
29064 - static atomic_t start_pool = ATOMIC_INIT(0);
29065 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
29066 unsigned i;
29067 - unsigned pool_offset = atomic_add_return(1, &start_pool);
29068 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
29069 struct ttm_page_pool *pool;
29070 int shrink_pages = sc->nr_to_scan;
29071
29072 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
29073 index 9cf87d9..2000b7d 100644
29074 --- a/drivers/gpu/drm/via/via_drv.h
29075 +++ b/drivers/gpu/drm/via/via_drv.h
29076 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29077 typedef uint32_t maskarray_t[5];
29078
29079 typedef struct drm_via_irq {
29080 - atomic_t irq_received;
29081 + atomic_unchecked_t irq_received;
29082 uint32_t pending_mask;
29083 uint32_t enable_mask;
29084 wait_queue_head_t irq_queue;
29085 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29086 struct timeval last_vblank;
29087 int last_vblank_valid;
29088 unsigned usec_per_vblank;
29089 - atomic_t vbl_received;
29090 + atomic_unchecked_t vbl_received;
29091 drm_via_state_t hc_state;
29092 char pci_buf[VIA_PCI_BUF_SIZE];
29093 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29094 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
29095 index d391f48..10c8ca3 100644
29096 --- a/drivers/gpu/drm/via/via_irq.c
29097 +++ b/drivers/gpu/drm/via/via_irq.c
29098 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
29099 if (crtc != 0)
29100 return 0;
29101
29102 - return atomic_read(&dev_priv->vbl_received);
29103 + return atomic_read_unchecked(&dev_priv->vbl_received);
29104 }
29105
29106 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29107 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29108
29109 status = VIA_READ(VIA_REG_INTERRUPT);
29110 if (status & VIA_IRQ_VBLANK_PENDING) {
29111 - atomic_inc(&dev_priv->vbl_received);
29112 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29113 + atomic_inc_unchecked(&dev_priv->vbl_received);
29114 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29115 do_gettimeofday(&cur_vblank);
29116 if (dev_priv->last_vblank_valid) {
29117 dev_priv->usec_per_vblank =
29118 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29119 dev_priv->last_vblank = cur_vblank;
29120 dev_priv->last_vblank_valid = 1;
29121 }
29122 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29123 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29124 DRM_DEBUG("US per vblank is: %u\n",
29125 dev_priv->usec_per_vblank);
29126 }
29127 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29128
29129 for (i = 0; i < dev_priv->num_irqs; ++i) {
29130 if (status & cur_irq->pending_mask) {
29131 - atomic_inc(&cur_irq->irq_received);
29132 + atomic_inc_unchecked(&cur_irq->irq_received);
29133 DRM_WAKEUP(&cur_irq->irq_queue);
29134 handled = 1;
29135 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
29136 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
29137 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29138 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29139 masks[irq][4]));
29140 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29141 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29142 } else {
29143 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29144 (((cur_irq_sequence =
29145 - atomic_read(&cur_irq->irq_received)) -
29146 + atomic_read_unchecked(&cur_irq->irq_received)) -
29147 *sequence) <= (1 << 23)));
29148 }
29149 *sequence = cur_irq_sequence;
29150 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
29151 }
29152
29153 for (i = 0; i < dev_priv->num_irqs; ++i) {
29154 - atomic_set(&cur_irq->irq_received, 0);
29155 + atomic_set_unchecked(&cur_irq->irq_received, 0);
29156 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29157 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29158 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29159 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
29160 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29161 case VIA_IRQ_RELATIVE:
29162 irqwait->request.sequence +=
29163 - atomic_read(&cur_irq->irq_received);
29164 + atomic_read_unchecked(&cur_irq->irq_received);
29165 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29166 case VIA_IRQ_ABSOLUTE:
29167 break;
29168 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29169 index dc27970..f18b008 100644
29170 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29171 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29172 @@ -260,7 +260,7 @@ struct vmw_private {
29173 * Fencing and IRQs.
29174 */
29175
29176 - atomic_t marker_seq;
29177 + atomic_unchecked_t marker_seq;
29178 wait_queue_head_t fence_queue;
29179 wait_queue_head_t fifo_queue;
29180 int fence_queue_waiters; /* Protected by hw_mutex */
29181 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29182 index a0c2f12..68ae6cb 100644
29183 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29184 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29185 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29186 (unsigned int) min,
29187 (unsigned int) fifo->capabilities);
29188
29189 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29190 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29191 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
29192 vmw_marker_queue_init(&fifo->marker_queue);
29193 return vmw_fifo_send_fence(dev_priv, &dummy);
29194 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29195 if (reserveable)
29196 iowrite32(bytes, fifo_mem +
29197 SVGA_FIFO_RESERVED);
29198 - return fifo_mem + (next_cmd >> 2);
29199 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29200 } else {
29201 need_bounce = true;
29202 }
29203 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29204
29205 fm = vmw_fifo_reserve(dev_priv, bytes);
29206 if (unlikely(fm == NULL)) {
29207 - *seqno = atomic_read(&dev_priv->marker_seq);
29208 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29209 ret = -ENOMEM;
29210 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29211 false, 3*HZ);
29212 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29213 }
29214
29215 do {
29216 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29217 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29218 } while (*seqno == 0);
29219
29220 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29221 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29222 index cabc95f..14b3d77 100644
29223 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29224 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29225 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29226 * emitted. Then the fence is stale and signaled.
29227 */
29228
29229 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29230 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29231 > VMW_FENCE_WRAP);
29232
29233 return ret;
29234 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29235
29236 if (fifo_idle)
29237 down_read(&fifo_state->rwsem);
29238 - signal_seq = atomic_read(&dev_priv->marker_seq);
29239 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29240 ret = 0;
29241
29242 for (;;) {
29243 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29244 index 8a8725c..afed796 100644
29245 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29246 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29247 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29248 while (!vmw_lag_lt(queue, us)) {
29249 spin_lock(&queue->lock);
29250 if (list_empty(&queue->head))
29251 - seqno = atomic_read(&dev_priv->marker_seq);
29252 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29253 else {
29254 marker = list_first_entry(&queue->head,
29255 struct vmw_marker, head);
29256 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29257 index bb656d8..4169fca 100644
29258 --- a/drivers/hid/hid-core.c
29259 +++ b/drivers/hid/hid-core.c
29260 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29261
29262 int hid_add_device(struct hid_device *hdev)
29263 {
29264 - static atomic_t id = ATOMIC_INIT(0);
29265 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29266 int ret;
29267
29268 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29269 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29270 /* XXX hack, any other cleaner solution after the driver core
29271 * is converted to allow more than 20 bytes as the device name? */
29272 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29273 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29274 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29275
29276 hid_debug_register(hdev, dev_name(&hdev->dev));
29277 ret = device_add(&hdev->dev);
29278 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29279 index 4ef02b2..8a96831 100644
29280 --- a/drivers/hid/usbhid/hiddev.c
29281 +++ b/drivers/hid/usbhid/hiddev.c
29282 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29283 break;
29284
29285 case HIDIOCAPPLICATION:
29286 - if (arg < 0 || arg >= hid->maxapplication)
29287 + if (arg >= hid->maxapplication)
29288 break;
29289
29290 for (i = 0; i < hid->maxcollection; i++)
29291 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29292 index 4065374..10ed7dc 100644
29293 --- a/drivers/hv/channel.c
29294 +++ b/drivers/hv/channel.c
29295 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29296 int ret = 0;
29297 int t;
29298
29299 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29300 - atomic_inc(&vmbus_connection.next_gpadl_handle);
29301 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29302 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29303
29304 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29305 if (ret)
29306 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29307 index 0fb100e..baf87e5 100644
29308 --- a/drivers/hv/hv.c
29309 +++ b/drivers/hv/hv.c
29310 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29311 u64 output_address = (output) ? virt_to_phys(output) : 0;
29312 u32 output_address_hi = output_address >> 32;
29313 u32 output_address_lo = output_address & 0xFFFFFFFF;
29314 - void *hypercall_page = hv_context.hypercall_page;
29315 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29316
29317 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29318 "=a"(hv_status_lo) : "d" (control_hi),
29319 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29320 index 0aee112..b72d21f 100644
29321 --- a/drivers/hv/hyperv_vmbus.h
29322 +++ b/drivers/hv/hyperv_vmbus.h
29323 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
29324 struct vmbus_connection {
29325 enum vmbus_connect_state conn_state;
29326
29327 - atomic_t next_gpadl_handle;
29328 + atomic_unchecked_t next_gpadl_handle;
29329
29330 /*
29331 * Represents channel interrupts. Each bit position represents a
29332 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29333 index d2d0a2a..90b8f4d 100644
29334 --- a/drivers/hv/vmbus_drv.c
29335 +++ b/drivers/hv/vmbus_drv.c
29336 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29337 {
29338 int ret = 0;
29339
29340 - static atomic_t device_num = ATOMIC_INIT(0);
29341 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29342
29343 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29344 - atomic_inc_return(&device_num));
29345 + atomic_inc_return_unchecked(&device_num));
29346
29347 child_device_obj->device.bus = &hv_bus;
29348 child_device_obj->device.parent = &hv_acpi_dev->dev;
29349 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29350 index 66f6729..2d6de0a 100644
29351 --- a/drivers/hwmon/acpi_power_meter.c
29352 +++ b/drivers/hwmon/acpi_power_meter.c
29353 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29354 return res;
29355
29356 temp /= 1000;
29357 - if (temp < 0)
29358 - return -EINVAL;
29359
29360 mutex_lock(&resource->lock);
29361 resource->trip[attr->index - 7] = temp;
29362 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29363 index 5357925..6cf0418 100644
29364 --- a/drivers/hwmon/sht15.c
29365 +++ b/drivers/hwmon/sht15.c
29366 @@ -166,7 +166,7 @@ struct sht15_data {
29367 int supply_uV;
29368 bool supply_uV_valid;
29369 struct work_struct update_supply_work;
29370 - atomic_t interrupt_handled;
29371 + atomic_unchecked_t interrupt_handled;
29372 };
29373
29374 /**
29375 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29376 return ret;
29377
29378 gpio_direction_input(data->pdata->gpio_data);
29379 - atomic_set(&data->interrupt_handled, 0);
29380 + atomic_set_unchecked(&data->interrupt_handled, 0);
29381
29382 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29383 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29384 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29385 /* Only relevant if the interrupt hasn't occurred. */
29386 - if (!atomic_read(&data->interrupt_handled))
29387 + if (!atomic_read_unchecked(&data->interrupt_handled))
29388 schedule_work(&data->read_work);
29389 }
29390 ret = wait_event_timeout(data->wait_queue,
29391 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29392
29393 /* First disable the interrupt */
29394 disable_irq_nosync(irq);
29395 - atomic_inc(&data->interrupt_handled);
29396 + atomic_inc_unchecked(&data->interrupt_handled);
29397 /* Then schedule a reading work struct */
29398 if (data->state != SHT15_READING_NOTHING)
29399 schedule_work(&data->read_work);
29400 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29401 * If not, then start the interrupt again - care here as could
29402 * have gone low in meantime so verify it hasn't!
29403 */
29404 - atomic_set(&data->interrupt_handled, 0);
29405 + atomic_set_unchecked(&data->interrupt_handled, 0);
29406 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29407 /* If still not occurred or another handler has been scheduled */
29408 if (gpio_get_value(data->pdata->gpio_data)
29409 - || atomic_read(&data->interrupt_handled))
29410 + || atomic_read_unchecked(&data->interrupt_handled))
29411 return;
29412 }
29413
29414 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29415 index 378fcb5..5e91fa8 100644
29416 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29417 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29418 @@ -43,7 +43,7 @@
29419 extern struct i2c_adapter amd756_smbus;
29420
29421 static struct i2c_adapter *s4882_adapter;
29422 -static struct i2c_algorithm *s4882_algo;
29423 +static i2c_algorithm_no_const *s4882_algo;
29424
29425 /* Wrapper access functions for multiplexed SMBus */
29426 static DEFINE_MUTEX(amd756_lock);
29427 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29428 index 29015eb..af2d8e9 100644
29429 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29430 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29431 @@ -41,7 +41,7 @@
29432 extern struct i2c_adapter *nforce2_smbus;
29433
29434 static struct i2c_adapter *s4985_adapter;
29435 -static struct i2c_algorithm *s4985_algo;
29436 +static i2c_algorithm_no_const *s4985_algo;
29437
29438 /* Wrapper access functions for multiplexed SMBus */
29439 static DEFINE_MUTEX(nforce2_lock);
29440 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29441 index d7a4833..7fae376 100644
29442 --- a/drivers/i2c/i2c-mux.c
29443 +++ b/drivers/i2c/i2c-mux.c
29444 @@ -28,7 +28,7 @@
29445 /* multiplexer per channel data */
29446 struct i2c_mux_priv {
29447 struct i2c_adapter adap;
29448 - struct i2c_algorithm algo;
29449 + i2c_algorithm_no_const algo;
29450
29451 struct i2c_adapter *parent;
29452 void *mux_dev; /* the mux chip/device */
29453 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29454 index 57d00ca..0145194 100644
29455 --- a/drivers/ide/aec62xx.c
29456 +++ b/drivers/ide/aec62xx.c
29457 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29458 .cable_detect = atp86x_cable_detect,
29459 };
29460
29461 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29462 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29463 { /* 0: AEC6210 */
29464 .name = DRV_NAME,
29465 .init_chipset = init_chipset_aec62xx,
29466 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29467 index 2c8016a..911a27c 100644
29468 --- a/drivers/ide/alim15x3.c
29469 +++ b/drivers/ide/alim15x3.c
29470 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29471 .dma_sff_read_status = ide_dma_sff_read_status,
29472 };
29473
29474 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29475 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29476 .name = DRV_NAME,
29477 .init_chipset = init_chipset_ali15x3,
29478 .init_hwif = init_hwif_ali15x3,
29479 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29480 index 3747b25..56fc995 100644
29481 --- a/drivers/ide/amd74xx.c
29482 +++ b/drivers/ide/amd74xx.c
29483 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29484 .udma_mask = udma, \
29485 }
29486
29487 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29488 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29489 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29490 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29491 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29492 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29493 index 15f0ead..cb43480 100644
29494 --- a/drivers/ide/atiixp.c
29495 +++ b/drivers/ide/atiixp.c
29496 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29497 .cable_detect = atiixp_cable_detect,
29498 };
29499
29500 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29501 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29502 { /* 0: IXP200/300/400/700 */
29503 .name = DRV_NAME,
29504 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29505 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29506 index 5f80312..d1fc438 100644
29507 --- a/drivers/ide/cmd64x.c
29508 +++ b/drivers/ide/cmd64x.c
29509 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29510 .dma_sff_read_status = ide_dma_sff_read_status,
29511 };
29512
29513 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29514 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29515 { /* 0: CMD643 */
29516 .name = DRV_NAME,
29517 .init_chipset = init_chipset_cmd64x,
29518 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29519 index 2c1e5f7..1444762 100644
29520 --- a/drivers/ide/cs5520.c
29521 +++ b/drivers/ide/cs5520.c
29522 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29523 .set_dma_mode = cs5520_set_dma_mode,
29524 };
29525
29526 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29527 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29528 .name = DRV_NAME,
29529 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29530 .port_ops = &cs5520_port_ops,
29531 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29532 index 4dc4eb9..49b40ad 100644
29533 --- a/drivers/ide/cs5530.c
29534 +++ b/drivers/ide/cs5530.c
29535 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29536 .udma_filter = cs5530_udma_filter,
29537 };
29538
29539 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29540 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29541 .name = DRV_NAME,
29542 .init_chipset = init_chipset_cs5530,
29543 .init_hwif = init_hwif_cs5530,
29544 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29545 index 5059faf..18d4c85 100644
29546 --- a/drivers/ide/cs5535.c
29547 +++ b/drivers/ide/cs5535.c
29548 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29549 .cable_detect = cs5535_cable_detect,
29550 };
29551
29552 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29553 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29554 .name = DRV_NAME,
29555 .port_ops = &cs5535_port_ops,
29556 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29557 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29558 index 847553f..3ffb49d 100644
29559 --- a/drivers/ide/cy82c693.c
29560 +++ b/drivers/ide/cy82c693.c
29561 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29562 .set_dma_mode = cy82c693_set_dma_mode,
29563 };
29564
29565 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29566 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29567 .name = DRV_NAME,
29568 .init_iops = init_iops_cy82c693,
29569 .port_ops = &cy82c693_port_ops,
29570 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29571 index 58c51cd..4aec3b8 100644
29572 --- a/drivers/ide/hpt366.c
29573 +++ b/drivers/ide/hpt366.c
29574 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29575 }
29576 };
29577
29578 -static const struct hpt_info hpt36x __devinitdata = {
29579 +static const struct hpt_info hpt36x __devinitconst = {
29580 .chip_name = "HPT36x",
29581 .chip_type = HPT36x,
29582 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29583 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29584 .timings = &hpt36x_timings
29585 };
29586
29587 -static const struct hpt_info hpt370 __devinitdata = {
29588 +static const struct hpt_info hpt370 __devinitconst = {
29589 .chip_name = "HPT370",
29590 .chip_type = HPT370,
29591 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29592 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29593 .timings = &hpt37x_timings
29594 };
29595
29596 -static const struct hpt_info hpt370a __devinitdata = {
29597 +static const struct hpt_info hpt370a __devinitconst = {
29598 .chip_name = "HPT370A",
29599 .chip_type = HPT370A,
29600 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29601 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29602 .timings = &hpt37x_timings
29603 };
29604
29605 -static const struct hpt_info hpt374 __devinitdata = {
29606 +static const struct hpt_info hpt374 __devinitconst = {
29607 .chip_name = "HPT374",
29608 .chip_type = HPT374,
29609 .udma_mask = ATA_UDMA5,
29610 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29611 .timings = &hpt37x_timings
29612 };
29613
29614 -static const struct hpt_info hpt372 __devinitdata = {
29615 +static const struct hpt_info hpt372 __devinitconst = {
29616 .chip_name = "HPT372",
29617 .chip_type = HPT372,
29618 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29619 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29620 .timings = &hpt37x_timings
29621 };
29622
29623 -static const struct hpt_info hpt372a __devinitdata = {
29624 +static const struct hpt_info hpt372a __devinitconst = {
29625 .chip_name = "HPT372A",
29626 .chip_type = HPT372A,
29627 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29628 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29629 .timings = &hpt37x_timings
29630 };
29631
29632 -static const struct hpt_info hpt302 __devinitdata = {
29633 +static const struct hpt_info hpt302 __devinitconst = {
29634 .chip_name = "HPT302",
29635 .chip_type = HPT302,
29636 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29637 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29638 .timings = &hpt37x_timings
29639 };
29640
29641 -static const struct hpt_info hpt371 __devinitdata = {
29642 +static const struct hpt_info hpt371 __devinitconst = {
29643 .chip_name = "HPT371",
29644 .chip_type = HPT371,
29645 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29646 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29647 .timings = &hpt37x_timings
29648 };
29649
29650 -static const struct hpt_info hpt372n __devinitdata = {
29651 +static const struct hpt_info hpt372n __devinitconst = {
29652 .chip_name = "HPT372N",
29653 .chip_type = HPT372N,
29654 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29655 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29656 .timings = &hpt37x_timings
29657 };
29658
29659 -static const struct hpt_info hpt302n __devinitdata = {
29660 +static const struct hpt_info hpt302n __devinitconst = {
29661 .chip_name = "HPT302N",
29662 .chip_type = HPT302N,
29663 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29664 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29665 .timings = &hpt37x_timings
29666 };
29667
29668 -static const struct hpt_info hpt371n __devinitdata = {
29669 +static const struct hpt_info hpt371n __devinitconst = {
29670 .chip_name = "HPT371N",
29671 .chip_type = HPT371N,
29672 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29673 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29674 .dma_sff_read_status = ide_dma_sff_read_status,
29675 };
29676
29677 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29678 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29679 { /* 0: HPT36x */
29680 .name = DRV_NAME,
29681 .init_chipset = init_chipset_hpt366,
29682 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29683 index 8126824..55a2798 100644
29684 --- a/drivers/ide/ide-cd.c
29685 +++ b/drivers/ide/ide-cd.c
29686 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29687 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29688 if ((unsigned long)buf & alignment
29689 || blk_rq_bytes(rq) & q->dma_pad_mask
29690 - || object_is_on_stack(buf))
29691 + || object_starts_on_stack(buf))
29692 drive->dma = 0;
29693 }
29694 }
29695 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29696 index a743e68..1cfd674 100644
29697 --- a/drivers/ide/ide-pci-generic.c
29698 +++ b/drivers/ide/ide-pci-generic.c
29699 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29700 .udma_mask = ATA_UDMA6, \
29701 }
29702
29703 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29704 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29705 /* 0: Unknown */
29706 DECLARE_GENERIC_PCI_DEV(0),
29707
29708 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29709 index 560e66d..d5dd180 100644
29710 --- a/drivers/ide/it8172.c
29711 +++ b/drivers/ide/it8172.c
29712 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29713 .set_dma_mode = it8172_set_dma_mode,
29714 };
29715
29716 -static const struct ide_port_info it8172_port_info __devinitdata = {
29717 +static const struct ide_port_info it8172_port_info __devinitconst = {
29718 .name = DRV_NAME,
29719 .port_ops = &it8172_port_ops,
29720 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29721 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29722 index 46816ba..1847aeb 100644
29723 --- a/drivers/ide/it8213.c
29724 +++ b/drivers/ide/it8213.c
29725 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29726 .cable_detect = it8213_cable_detect,
29727 };
29728
29729 -static const struct ide_port_info it8213_chipset __devinitdata = {
29730 +static const struct ide_port_info it8213_chipset __devinitconst = {
29731 .name = DRV_NAME,
29732 .enablebits = { {0x41, 0x80, 0x80} },
29733 .port_ops = &it8213_port_ops,
29734 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29735 index 2e3169f..c5611db 100644
29736 --- a/drivers/ide/it821x.c
29737 +++ b/drivers/ide/it821x.c
29738 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29739 .cable_detect = it821x_cable_detect,
29740 };
29741
29742 -static const struct ide_port_info it821x_chipset __devinitdata = {
29743 +static const struct ide_port_info it821x_chipset __devinitconst = {
29744 .name = DRV_NAME,
29745 .init_chipset = init_chipset_it821x,
29746 .init_hwif = init_hwif_it821x,
29747 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29748 index 74c2c4a..efddd7d 100644
29749 --- a/drivers/ide/jmicron.c
29750 +++ b/drivers/ide/jmicron.c
29751 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29752 .cable_detect = jmicron_cable_detect,
29753 };
29754
29755 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29756 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29757 .name = DRV_NAME,
29758 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29759 .port_ops = &jmicron_port_ops,
29760 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29761 index 95327a2..73f78d8 100644
29762 --- a/drivers/ide/ns87415.c
29763 +++ b/drivers/ide/ns87415.c
29764 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29765 .dma_sff_read_status = superio_dma_sff_read_status,
29766 };
29767
29768 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29769 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29770 .name = DRV_NAME,
29771 .init_hwif = init_hwif_ns87415,
29772 .tp_ops = &ns87415_tp_ops,
29773 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29774 index 1a53a4c..39edc66 100644
29775 --- a/drivers/ide/opti621.c
29776 +++ b/drivers/ide/opti621.c
29777 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29778 .set_pio_mode = opti621_set_pio_mode,
29779 };
29780
29781 -static const struct ide_port_info opti621_chipset __devinitdata = {
29782 +static const struct ide_port_info opti621_chipset __devinitconst = {
29783 .name = DRV_NAME,
29784 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29785 .port_ops = &opti621_port_ops,
29786 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29787 index 9546fe2..2e5ceb6 100644
29788 --- a/drivers/ide/pdc202xx_new.c
29789 +++ b/drivers/ide/pdc202xx_new.c
29790 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29791 .udma_mask = udma, \
29792 }
29793
29794 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29795 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29796 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29797 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29798 };
29799 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29800 index 3a35ec6..5634510 100644
29801 --- a/drivers/ide/pdc202xx_old.c
29802 +++ b/drivers/ide/pdc202xx_old.c
29803 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29804 .max_sectors = sectors, \
29805 }
29806
29807 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29808 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29809 { /* 0: PDC20246 */
29810 .name = DRV_NAME,
29811 .init_chipset = init_chipset_pdc202xx,
29812 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29813 index 1892e81..fe0fd60 100644
29814 --- a/drivers/ide/piix.c
29815 +++ b/drivers/ide/piix.c
29816 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29817 .udma_mask = udma, \
29818 }
29819
29820 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29821 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29822 /* 0: MPIIX */
29823 { /*
29824 * MPIIX actually has only a single IDE channel mapped to
29825 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29826 index a6414a8..c04173e 100644
29827 --- a/drivers/ide/rz1000.c
29828 +++ b/drivers/ide/rz1000.c
29829 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29830 }
29831 }
29832
29833 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29834 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29835 .name = DRV_NAME,
29836 .host_flags = IDE_HFLAG_NO_DMA,
29837 };
29838 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29839 index 356b9b5..d4758eb 100644
29840 --- a/drivers/ide/sc1200.c
29841 +++ b/drivers/ide/sc1200.c
29842 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29843 .dma_sff_read_status = ide_dma_sff_read_status,
29844 };
29845
29846 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29847 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29848 .name = DRV_NAME,
29849 .port_ops = &sc1200_port_ops,
29850 .dma_ops = &sc1200_dma_ops,
29851 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29852 index b7f5b0c..9701038 100644
29853 --- a/drivers/ide/scc_pata.c
29854 +++ b/drivers/ide/scc_pata.c
29855 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29856 .dma_sff_read_status = scc_dma_sff_read_status,
29857 };
29858
29859 -static const struct ide_port_info scc_chipset __devinitdata = {
29860 +static const struct ide_port_info scc_chipset __devinitconst = {
29861 .name = "sccIDE",
29862 .init_iops = init_iops_scc,
29863 .init_dma = scc_init_dma,
29864 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29865 index 35fb8da..24d72ef 100644
29866 --- a/drivers/ide/serverworks.c
29867 +++ b/drivers/ide/serverworks.c
29868 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29869 .cable_detect = svwks_cable_detect,
29870 };
29871
29872 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29873 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29874 { /* 0: OSB4 */
29875 .name = DRV_NAME,
29876 .init_chipset = init_chipset_svwks,
29877 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29878 index ddeda44..46f7e30 100644
29879 --- a/drivers/ide/siimage.c
29880 +++ b/drivers/ide/siimage.c
29881 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29882 .udma_mask = ATA_UDMA6, \
29883 }
29884
29885 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29886 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29887 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29888 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29889 };
29890 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29891 index 4a00225..09e61b4 100644
29892 --- a/drivers/ide/sis5513.c
29893 +++ b/drivers/ide/sis5513.c
29894 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29895 .cable_detect = sis_cable_detect,
29896 };
29897
29898 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29899 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29900 .name = DRV_NAME,
29901 .init_chipset = init_chipset_sis5513,
29902 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29903 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29904 index f21dc2a..d051cd2 100644
29905 --- a/drivers/ide/sl82c105.c
29906 +++ b/drivers/ide/sl82c105.c
29907 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29908 .dma_sff_read_status = ide_dma_sff_read_status,
29909 };
29910
29911 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29912 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29913 .name = DRV_NAME,
29914 .init_chipset = init_chipset_sl82c105,
29915 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29916 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29917 index 864ffe0..863a5e9 100644
29918 --- a/drivers/ide/slc90e66.c
29919 +++ b/drivers/ide/slc90e66.c
29920 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29921 .cable_detect = slc90e66_cable_detect,
29922 };
29923
29924 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29925 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29926 .name = DRV_NAME,
29927 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29928 .port_ops = &slc90e66_port_ops,
29929 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29930 index 4799d5c..1794678 100644
29931 --- a/drivers/ide/tc86c001.c
29932 +++ b/drivers/ide/tc86c001.c
29933 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29934 .dma_sff_read_status = ide_dma_sff_read_status,
29935 };
29936
29937 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29938 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29939 .name = DRV_NAME,
29940 .init_hwif = init_hwif_tc86c001,
29941 .port_ops = &tc86c001_port_ops,
29942 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29943 index 281c914..55ce1b8 100644
29944 --- a/drivers/ide/triflex.c
29945 +++ b/drivers/ide/triflex.c
29946 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29947 .set_dma_mode = triflex_set_mode,
29948 };
29949
29950 -static const struct ide_port_info triflex_device __devinitdata = {
29951 +static const struct ide_port_info triflex_device __devinitconst = {
29952 .name = DRV_NAME,
29953 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29954 .port_ops = &triflex_port_ops,
29955 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29956 index 4b42ca0..e494a98 100644
29957 --- a/drivers/ide/trm290.c
29958 +++ b/drivers/ide/trm290.c
29959 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29960 .dma_check = trm290_dma_check,
29961 };
29962
29963 -static const struct ide_port_info trm290_chipset __devinitdata = {
29964 +static const struct ide_port_info trm290_chipset __devinitconst = {
29965 .name = DRV_NAME,
29966 .init_hwif = init_hwif_trm290,
29967 .tp_ops = &trm290_tp_ops,
29968 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29969 index f46f49c..eb77678 100644
29970 --- a/drivers/ide/via82cxxx.c
29971 +++ b/drivers/ide/via82cxxx.c
29972 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29973 .cable_detect = via82cxxx_cable_detect,
29974 };
29975
29976 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29977 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29978 .name = DRV_NAME,
29979 .init_chipset = init_chipset_via82cxxx,
29980 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29981 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29982 index eb0e2cc..14241c7 100644
29983 --- a/drivers/ieee802154/fakehard.c
29984 +++ b/drivers/ieee802154/fakehard.c
29985 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29986 phy->transmit_power = 0xbf;
29987
29988 dev->netdev_ops = &fake_ops;
29989 - dev->ml_priv = &fake_mlme;
29990 + dev->ml_priv = (void *)&fake_mlme;
29991
29992 priv = netdev_priv(dev);
29993 priv->phy = phy;
29994 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29995 index 8b72f39..55df4c8 100644
29996 --- a/drivers/infiniband/core/cm.c
29997 +++ b/drivers/infiniband/core/cm.c
29998 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29999
30000 struct cm_counter_group {
30001 struct kobject obj;
30002 - atomic_long_t counter[CM_ATTR_COUNT];
30003 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30004 };
30005
30006 struct cm_counter_attribute {
30007 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
30008 struct ib_mad_send_buf *msg = NULL;
30009 int ret;
30010
30011 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30012 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30013 counter[CM_REQ_COUNTER]);
30014
30015 /* Quick state check to discard duplicate REQs. */
30016 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
30017 if (!cm_id_priv)
30018 return;
30019
30020 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30021 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30022 counter[CM_REP_COUNTER]);
30023 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30024 if (ret)
30025 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
30026 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30027 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30028 spin_unlock_irq(&cm_id_priv->lock);
30029 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30030 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30031 counter[CM_RTU_COUNTER]);
30032 goto out;
30033 }
30034 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
30035 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30036 dreq_msg->local_comm_id);
30037 if (!cm_id_priv) {
30038 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30039 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30040 counter[CM_DREQ_COUNTER]);
30041 cm_issue_drep(work->port, work->mad_recv_wc);
30042 return -EINVAL;
30043 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
30044 case IB_CM_MRA_REP_RCVD:
30045 break;
30046 case IB_CM_TIMEWAIT:
30047 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30048 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30049 counter[CM_DREQ_COUNTER]);
30050 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30051 goto unlock;
30052 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
30053 cm_free_msg(msg);
30054 goto deref;
30055 case IB_CM_DREQ_RCVD:
30056 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30057 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30058 counter[CM_DREQ_COUNTER]);
30059 goto unlock;
30060 default:
30061 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
30062 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30063 cm_id_priv->msg, timeout)) {
30064 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30065 - atomic_long_inc(&work->port->
30066 + atomic_long_inc_unchecked(&work->port->
30067 counter_group[CM_RECV_DUPLICATES].
30068 counter[CM_MRA_COUNTER]);
30069 goto out;
30070 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
30071 break;
30072 case IB_CM_MRA_REQ_RCVD:
30073 case IB_CM_MRA_REP_RCVD:
30074 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30075 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30076 counter[CM_MRA_COUNTER]);
30077 /* fall through */
30078 default:
30079 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
30080 case IB_CM_LAP_IDLE:
30081 break;
30082 case IB_CM_MRA_LAP_SENT:
30083 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30084 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30085 counter[CM_LAP_COUNTER]);
30086 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30087 goto unlock;
30088 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
30089 cm_free_msg(msg);
30090 goto deref;
30091 case IB_CM_LAP_RCVD:
30092 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30093 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30094 counter[CM_LAP_COUNTER]);
30095 goto unlock;
30096 default:
30097 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
30098 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30099 if (cur_cm_id_priv) {
30100 spin_unlock_irq(&cm.lock);
30101 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30102 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30103 counter[CM_SIDR_REQ_COUNTER]);
30104 goto out; /* Duplicate message. */
30105 }
30106 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
30107 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30108 msg->retries = 1;
30109
30110 - atomic_long_add(1 + msg->retries,
30111 + atomic_long_add_unchecked(1 + msg->retries,
30112 &port->counter_group[CM_XMIT].counter[attr_index]);
30113 if (msg->retries)
30114 - atomic_long_add(msg->retries,
30115 + atomic_long_add_unchecked(msg->retries,
30116 &port->counter_group[CM_XMIT_RETRIES].
30117 counter[attr_index]);
30118
30119 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
30120 }
30121
30122 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30123 - atomic_long_inc(&port->counter_group[CM_RECV].
30124 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30125 counter[attr_id - CM_ATTR_ID_OFFSET]);
30126
30127 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30128 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
30129 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30130
30131 return sprintf(buf, "%ld\n",
30132 - atomic_long_read(&group->counter[cm_attr->index]));
30133 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30134 }
30135
30136 static const struct sysfs_ops cm_counter_ops = {
30137 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
30138 index 176c8f9..2627b62 100644
30139 --- a/drivers/infiniband/core/fmr_pool.c
30140 +++ b/drivers/infiniband/core/fmr_pool.c
30141 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
30142
30143 struct task_struct *thread;
30144
30145 - atomic_t req_ser;
30146 - atomic_t flush_ser;
30147 + atomic_unchecked_t req_ser;
30148 + atomic_unchecked_t flush_ser;
30149
30150 wait_queue_head_t force_wait;
30151 };
30152 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30153 struct ib_fmr_pool *pool = pool_ptr;
30154
30155 do {
30156 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30157 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30158 ib_fmr_batch_release(pool);
30159
30160 - atomic_inc(&pool->flush_ser);
30161 + atomic_inc_unchecked(&pool->flush_ser);
30162 wake_up_interruptible(&pool->force_wait);
30163
30164 if (pool->flush_function)
30165 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30166 }
30167
30168 set_current_state(TASK_INTERRUPTIBLE);
30169 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30170 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30171 !kthread_should_stop())
30172 schedule();
30173 __set_current_state(TASK_RUNNING);
30174 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
30175 pool->dirty_watermark = params->dirty_watermark;
30176 pool->dirty_len = 0;
30177 spin_lock_init(&pool->pool_lock);
30178 - atomic_set(&pool->req_ser, 0);
30179 - atomic_set(&pool->flush_ser, 0);
30180 + atomic_set_unchecked(&pool->req_ser, 0);
30181 + atomic_set_unchecked(&pool->flush_ser, 0);
30182 init_waitqueue_head(&pool->force_wait);
30183
30184 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30185 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
30186 }
30187 spin_unlock_irq(&pool->pool_lock);
30188
30189 - serial = atomic_inc_return(&pool->req_ser);
30190 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30191 wake_up_process(pool->thread);
30192
30193 if (wait_event_interruptible(pool->force_wait,
30194 - atomic_read(&pool->flush_ser) - serial >= 0))
30195 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30196 return -EINTR;
30197
30198 return 0;
30199 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30200 } else {
30201 list_add_tail(&fmr->list, &pool->dirty_list);
30202 if (++pool->dirty_len >= pool->dirty_watermark) {
30203 - atomic_inc(&pool->req_ser);
30204 + atomic_inc_unchecked(&pool->req_ser);
30205 wake_up_process(pool->thread);
30206 }
30207 }
30208 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30209 index 40c8353..946b0e4 100644
30210 --- a/drivers/infiniband/hw/cxgb4/mem.c
30211 +++ b/drivers/infiniband/hw/cxgb4/mem.c
30212 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30213 int err;
30214 struct fw_ri_tpte tpt;
30215 u32 stag_idx;
30216 - static atomic_t key;
30217 + static atomic_unchecked_t key;
30218
30219 if (c4iw_fatal_error(rdev))
30220 return -EIO;
30221 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30222 &rdev->resource.tpt_fifo_lock);
30223 if (!stag_idx)
30224 return -ENOMEM;
30225 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30226 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30227 }
30228 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30229 __func__, stag_state, type, pdid, stag_idx);
30230 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30231 index 79b3dbc..96e5fcc 100644
30232 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
30233 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30234 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30235 struct ib_atomic_eth *ateth;
30236 struct ipath_ack_entry *e;
30237 u64 vaddr;
30238 - atomic64_t *maddr;
30239 + atomic64_unchecked_t *maddr;
30240 u64 sdata;
30241 u32 rkey;
30242 u8 next;
30243 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30244 IB_ACCESS_REMOTE_ATOMIC)))
30245 goto nack_acc_unlck;
30246 /* Perform atomic OP and save result. */
30247 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30248 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30249 sdata = be64_to_cpu(ateth->swap_data);
30250 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30251 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30252 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30253 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30254 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30255 be64_to_cpu(ateth->compare_data),
30256 sdata);
30257 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30258 index 1f95bba..9530f87 100644
30259 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30260 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30261 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30262 unsigned long flags;
30263 struct ib_wc wc;
30264 u64 sdata;
30265 - atomic64_t *maddr;
30266 + atomic64_unchecked_t *maddr;
30267 enum ib_wc_status send_status;
30268
30269 /*
30270 @@ -382,11 +382,11 @@ again:
30271 IB_ACCESS_REMOTE_ATOMIC)))
30272 goto acc_err;
30273 /* Perform atomic OP and save result. */
30274 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30275 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30276 sdata = wqe->wr.wr.atomic.compare_add;
30277 *(u64 *) sqp->s_sge.sge.vaddr =
30278 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30279 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30280 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30281 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30282 sdata, wqe->wr.wr.atomic.swap);
30283 goto send_comp;
30284 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30285 index 5965b3d..16817fb 100644
30286 --- a/drivers/infiniband/hw/nes/nes.c
30287 +++ b/drivers/infiniband/hw/nes/nes.c
30288 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30289 LIST_HEAD(nes_adapter_list);
30290 static LIST_HEAD(nes_dev_list);
30291
30292 -atomic_t qps_destroyed;
30293 +atomic_unchecked_t qps_destroyed;
30294
30295 static unsigned int ee_flsh_adapter;
30296 static unsigned int sysfs_nonidx_addr;
30297 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30298 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30299 struct nes_adapter *nesadapter = nesdev->nesadapter;
30300
30301 - atomic_inc(&qps_destroyed);
30302 + atomic_inc_unchecked(&qps_destroyed);
30303
30304 /* Free the control structures */
30305
30306 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30307 index 568b4f1..5ea3eff 100644
30308 --- a/drivers/infiniband/hw/nes/nes.h
30309 +++ b/drivers/infiniband/hw/nes/nes.h
30310 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30311 extern unsigned int wqm_quanta;
30312 extern struct list_head nes_adapter_list;
30313
30314 -extern atomic_t cm_connects;
30315 -extern atomic_t cm_accepts;
30316 -extern atomic_t cm_disconnects;
30317 -extern atomic_t cm_closes;
30318 -extern atomic_t cm_connecteds;
30319 -extern atomic_t cm_connect_reqs;
30320 -extern atomic_t cm_rejects;
30321 -extern atomic_t mod_qp_timouts;
30322 -extern atomic_t qps_created;
30323 -extern atomic_t qps_destroyed;
30324 -extern atomic_t sw_qps_destroyed;
30325 +extern atomic_unchecked_t cm_connects;
30326 +extern atomic_unchecked_t cm_accepts;
30327 +extern atomic_unchecked_t cm_disconnects;
30328 +extern atomic_unchecked_t cm_closes;
30329 +extern atomic_unchecked_t cm_connecteds;
30330 +extern atomic_unchecked_t cm_connect_reqs;
30331 +extern atomic_unchecked_t cm_rejects;
30332 +extern atomic_unchecked_t mod_qp_timouts;
30333 +extern atomic_unchecked_t qps_created;
30334 +extern atomic_unchecked_t qps_destroyed;
30335 +extern atomic_unchecked_t sw_qps_destroyed;
30336 extern u32 mh_detected;
30337 extern u32 mh_pauses_sent;
30338 extern u32 cm_packets_sent;
30339 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30340 extern u32 cm_packets_received;
30341 extern u32 cm_packets_dropped;
30342 extern u32 cm_packets_retrans;
30343 -extern atomic_t cm_listens_created;
30344 -extern atomic_t cm_listens_destroyed;
30345 +extern atomic_unchecked_t cm_listens_created;
30346 +extern atomic_unchecked_t cm_listens_destroyed;
30347 extern u32 cm_backlog_drops;
30348 -extern atomic_t cm_loopbacks;
30349 -extern atomic_t cm_nodes_created;
30350 -extern atomic_t cm_nodes_destroyed;
30351 -extern atomic_t cm_accel_dropped_pkts;
30352 -extern atomic_t cm_resets_recvd;
30353 -extern atomic_t pau_qps_created;
30354 -extern atomic_t pau_qps_destroyed;
30355 +extern atomic_unchecked_t cm_loopbacks;
30356 +extern atomic_unchecked_t cm_nodes_created;
30357 +extern atomic_unchecked_t cm_nodes_destroyed;
30358 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30359 +extern atomic_unchecked_t cm_resets_recvd;
30360 +extern atomic_unchecked_t pau_qps_created;
30361 +extern atomic_unchecked_t pau_qps_destroyed;
30362
30363 extern u32 int_mod_timer_init;
30364 extern u32 int_mod_cq_depth_256;
30365 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30366 index 0a52d72..0642f36 100644
30367 --- a/drivers/infiniband/hw/nes/nes_cm.c
30368 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30369 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30370 u32 cm_packets_retrans;
30371 u32 cm_packets_created;
30372 u32 cm_packets_received;
30373 -atomic_t cm_listens_created;
30374 -atomic_t cm_listens_destroyed;
30375 +atomic_unchecked_t cm_listens_created;
30376 +atomic_unchecked_t cm_listens_destroyed;
30377 u32 cm_backlog_drops;
30378 -atomic_t cm_loopbacks;
30379 -atomic_t cm_nodes_created;
30380 -atomic_t cm_nodes_destroyed;
30381 -atomic_t cm_accel_dropped_pkts;
30382 -atomic_t cm_resets_recvd;
30383 +atomic_unchecked_t cm_loopbacks;
30384 +atomic_unchecked_t cm_nodes_created;
30385 +atomic_unchecked_t cm_nodes_destroyed;
30386 +atomic_unchecked_t cm_accel_dropped_pkts;
30387 +atomic_unchecked_t cm_resets_recvd;
30388
30389 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30390 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30391 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30392
30393 static struct nes_cm_core *g_cm_core;
30394
30395 -atomic_t cm_connects;
30396 -atomic_t cm_accepts;
30397 -atomic_t cm_disconnects;
30398 -atomic_t cm_closes;
30399 -atomic_t cm_connecteds;
30400 -atomic_t cm_connect_reqs;
30401 -atomic_t cm_rejects;
30402 +atomic_unchecked_t cm_connects;
30403 +atomic_unchecked_t cm_accepts;
30404 +atomic_unchecked_t cm_disconnects;
30405 +atomic_unchecked_t cm_closes;
30406 +atomic_unchecked_t cm_connecteds;
30407 +atomic_unchecked_t cm_connect_reqs;
30408 +atomic_unchecked_t cm_rejects;
30409
30410 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30411 {
30412 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30413 kfree(listener);
30414 listener = NULL;
30415 ret = 0;
30416 - atomic_inc(&cm_listens_destroyed);
30417 + atomic_inc_unchecked(&cm_listens_destroyed);
30418 } else {
30419 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30420 }
30421 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30422 cm_node->rem_mac);
30423
30424 add_hte_node(cm_core, cm_node);
30425 - atomic_inc(&cm_nodes_created);
30426 + atomic_inc_unchecked(&cm_nodes_created);
30427
30428 return cm_node;
30429 }
30430 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30431 }
30432
30433 atomic_dec(&cm_core->node_cnt);
30434 - atomic_inc(&cm_nodes_destroyed);
30435 + atomic_inc_unchecked(&cm_nodes_destroyed);
30436 nesqp = cm_node->nesqp;
30437 if (nesqp) {
30438 nesqp->cm_node = NULL;
30439 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30440
30441 static void drop_packet(struct sk_buff *skb)
30442 {
30443 - atomic_inc(&cm_accel_dropped_pkts);
30444 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30445 dev_kfree_skb_any(skb);
30446 }
30447
30448 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30449 {
30450
30451 int reset = 0; /* whether to send reset in case of err.. */
30452 - atomic_inc(&cm_resets_recvd);
30453 + atomic_inc_unchecked(&cm_resets_recvd);
30454 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30455 " refcnt=%d\n", cm_node, cm_node->state,
30456 atomic_read(&cm_node->ref_count));
30457 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30458 rem_ref_cm_node(cm_node->cm_core, cm_node);
30459 return NULL;
30460 }
30461 - atomic_inc(&cm_loopbacks);
30462 + atomic_inc_unchecked(&cm_loopbacks);
30463 loopbackremotenode->loopbackpartner = cm_node;
30464 loopbackremotenode->tcp_cntxt.rcv_wscale =
30465 NES_CM_DEFAULT_RCV_WND_SCALE;
30466 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30467 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30468 else {
30469 rem_ref_cm_node(cm_core, cm_node);
30470 - atomic_inc(&cm_accel_dropped_pkts);
30471 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30472 dev_kfree_skb_any(skb);
30473 }
30474 break;
30475 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30476
30477 if ((cm_id) && (cm_id->event_handler)) {
30478 if (issue_disconn) {
30479 - atomic_inc(&cm_disconnects);
30480 + atomic_inc_unchecked(&cm_disconnects);
30481 cm_event.event = IW_CM_EVENT_DISCONNECT;
30482 cm_event.status = disconn_status;
30483 cm_event.local_addr = cm_id->local_addr;
30484 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30485 }
30486
30487 if (issue_close) {
30488 - atomic_inc(&cm_closes);
30489 + atomic_inc_unchecked(&cm_closes);
30490 nes_disconnect(nesqp, 1);
30491
30492 cm_id->provider_data = nesqp;
30493 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30494
30495 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30496 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30497 - atomic_inc(&cm_accepts);
30498 + atomic_inc_unchecked(&cm_accepts);
30499
30500 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30501 netdev_refcnt_read(nesvnic->netdev));
30502 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30503 struct nes_cm_core *cm_core;
30504 u8 *start_buff;
30505
30506 - atomic_inc(&cm_rejects);
30507 + atomic_inc_unchecked(&cm_rejects);
30508 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30509 loopback = cm_node->loopbackpartner;
30510 cm_core = cm_node->cm_core;
30511 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30512 ntohl(cm_id->local_addr.sin_addr.s_addr),
30513 ntohs(cm_id->local_addr.sin_port));
30514
30515 - atomic_inc(&cm_connects);
30516 + atomic_inc_unchecked(&cm_connects);
30517 nesqp->active_conn = 1;
30518
30519 /* cache the cm_id in the qp */
30520 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30521 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30522 return err;
30523 }
30524 - atomic_inc(&cm_listens_created);
30525 + atomic_inc_unchecked(&cm_listens_created);
30526 }
30527
30528 cm_id->add_ref(cm_id);
30529 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30530
30531 if (nesqp->destroyed)
30532 return;
30533 - atomic_inc(&cm_connecteds);
30534 + atomic_inc_unchecked(&cm_connecteds);
30535 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30536 " local port 0x%04X. jiffies = %lu.\n",
30537 nesqp->hwqp.qp_id,
30538 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30539
30540 cm_id->add_ref(cm_id);
30541 ret = cm_id->event_handler(cm_id, &cm_event);
30542 - atomic_inc(&cm_closes);
30543 + atomic_inc_unchecked(&cm_closes);
30544 cm_event.event = IW_CM_EVENT_CLOSE;
30545 cm_event.status = 0;
30546 cm_event.provider_data = cm_id->provider_data;
30547 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30548 return;
30549 cm_id = cm_node->cm_id;
30550
30551 - atomic_inc(&cm_connect_reqs);
30552 + atomic_inc_unchecked(&cm_connect_reqs);
30553 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30554 cm_node, cm_id, jiffies);
30555
30556 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30557 return;
30558 cm_id = cm_node->cm_id;
30559
30560 - atomic_inc(&cm_connect_reqs);
30561 + atomic_inc_unchecked(&cm_connect_reqs);
30562 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30563 cm_node, cm_id, jiffies);
30564
30565 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30566 index b3b2a24..7bfaf1e 100644
30567 --- a/drivers/infiniband/hw/nes/nes_mgt.c
30568 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
30569 @@ -40,8 +40,8 @@
30570 #include "nes.h"
30571 #include "nes_mgt.h"
30572
30573 -atomic_t pau_qps_created;
30574 -atomic_t pau_qps_destroyed;
30575 +atomic_unchecked_t pau_qps_created;
30576 +atomic_unchecked_t pau_qps_destroyed;
30577
30578 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30579 {
30580 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30581 {
30582 struct sk_buff *skb;
30583 unsigned long flags;
30584 - atomic_inc(&pau_qps_destroyed);
30585 + atomic_inc_unchecked(&pau_qps_destroyed);
30586
30587 /* Free packets that have not yet been forwarded */
30588 /* Lock is acquired by skb_dequeue when removing the skb */
30589 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30590 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30591 skb_queue_head_init(&nesqp->pau_list);
30592 spin_lock_init(&nesqp->pau_lock);
30593 - atomic_inc(&pau_qps_created);
30594 + atomic_inc_unchecked(&pau_qps_created);
30595 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30596 }
30597
30598 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30599 index c00d2f3..8834298 100644
30600 --- a/drivers/infiniband/hw/nes/nes_nic.c
30601 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30602 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30603 target_stat_values[++index] = mh_detected;
30604 target_stat_values[++index] = mh_pauses_sent;
30605 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30606 - target_stat_values[++index] = atomic_read(&cm_connects);
30607 - target_stat_values[++index] = atomic_read(&cm_accepts);
30608 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30609 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30610 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30611 - target_stat_values[++index] = atomic_read(&cm_rejects);
30612 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30613 - target_stat_values[++index] = atomic_read(&qps_created);
30614 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30615 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30616 - target_stat_values[++index] = atomic_read(&cm_closes);
30617 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30618 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30619 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30620 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30621 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30622 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30623 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30624 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30625 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30626 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30627 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30628 target_stat_values[++index] = cm_packets_sent;
30629 target_stat_values[++index] = cm_packets_bounced;
30630 target_stat_values[++index] = cm_packets_created;
30631 target_stat_values[++index] = cm_packets_received;
30632 target_stat_values[++index] = cm_packets_dropped;
30633 target_stat_values[++index] = cm_packets_retrans;
30634 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30635 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30636 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30637 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30638 target_stat_values[++index] = cm_backlog_drops;
30639 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30640 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30641 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30642 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30643 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30644 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30645 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30646 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30647 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30648 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30649 target_stat_values[++index] = nesadapter->free_4kpbl;
30650 target_stat_values[++index] = nesadapter->free_256pbl;
30651 target_stat_values[++index] = int_mod_timer_init;
30652 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30653 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30654 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30655 - target_stat_values[++index] = atomic_read(&pau_qps_created);
30656 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30657 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30658 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30659 }
30660
30661 /**
30662 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30663 index 5095bc4..41e8fff 100644
30664 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30665 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30666 @@ -46,9 +46,9 @@
30667
30668 #include <rdma/ib_umem.h>
30669
30670 -atomic_t mod_qp_timouts;
30671 -atomic_t qps_created;
30672 -atomic_t sw_qps_destroyed;
30673 +atomic_unchecked_t mod_qp_timouts;
30674 +atomic_unchecked_t qps_created;
30675 +atomic_unchecked_t sw_qps_destroyed;
30676
30677 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30678
30679 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30680 if (init_attr->create_flags)
30681 return ERR_PTR(-EINVAL);
30682
30683 - atomic_inc(&qps_created);
30684 + atomic_inc_unchecked(&qps_created);
30685 switch (init_attr->qp_type) {
30686 case IB_QPT_RC:
30687 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30688 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30689 struct iw_cm_event cm_event;
30690 int ret = 0;
30691
30692 - atomic_inc(&sw_qps_destroyed);
30693 + atomic_inc_unchecked(&sw_qps_destroyed);
30694 nesqp->destroyed = 1;
30695
30696 /* Blow away the connection if it exists. */
30697 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30698 index b881bdc..c2e360c 100644
30699 --- a/drivers/infiniband/hw/qib/qib.h
30700 +++ b/drivers/infiniband/hw/qib/qib.h
30701 @@ -51,6 +51,7 @@
30702 #include <linux/completion.h>
30703 #include <linux/kref.h>
30704 #include <linux/sched.h>
30705 +#include <linux/slab.h>
30706
30707 #include "qib_common.h"
30708 #include "qib_verbs.h"
30709 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30710 index c351aa4..e6967c2 100644
30711 --- a/drivers/input/gameport/gameport.c
30712 +++ b/drivers/input/gameport/gameport.c
30713 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30714 */
30715 static void gameport_init_port(struct gameport *gameport)
30716 {
30717 - static atomic_t gameport_no = ATOMIC_INIT(0);
30718 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30719
30720 __module_get(THIS_MODULE);
30721
30722 mutex_init(&gameport->drv_mutex);
30723 device_initialize(&gameport->dev);
30724 dev_set_name(&gameport->dev, "gameport%lu",
30725 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30726 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30727 gameport->dev.bus = &gameport_bus;
30728 gameport->dev.release = gameport_release_port;
30729 if (gameport->parent)
30730 diff --git a/drivers/input/input.c b/drivers/input/input.c
30731 index da38d97..2aa0b79 100644
30732 --- a/drivers/input/input.c
30733 +++ b/drivers/input/input.c
30734 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30735 */
30736 int input_register_device(struct input_dev *dev)
30737 {
30738 - static atomic_t input_no = ATOMIC_INIT(0);
30739 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30740 struct input_handler *handler;
30741 const char *path;
30742 int error;
30743 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30744 dev->setkeycode = input_default_setkeycode;
30745
30746 dev_set_name(&dev->dev, "input%ld",
30747 - (unsigned long) atomic_inc_return(&input_no) - 1);
30748 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30749
30750 error = device_add(&dev->dev);
30751 if (error)
30752 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30753 index b8d8611..7a4a04b 100644
30754 --- a/drivers/input/joystick/sidewinder.c
30755 +++ b/drivers/input/joystick/sidewinder.c
30756 @@ -30,6 +30,7 @@
30757 #include <linux/kernel.h>
30758 #include <linux/module.h>
30759 #include <linux/slab.h>
30760 +#include <linux/sched.h>
30761 #include <linux/init.h>
30762 #include <linux/input.h>
30763 #include <linux/gameport.h>
30764 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30765 index d728875..844c89b 100644
30766 --- a/drivers/input/joystick/xpad.c
30767 +++ b/drivers/input/joystick/xpad.c
30768 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30769
30770 static int xpad_led_probe(struct usb_xpad *xpad)
30771 {
30772 - static atomic_t led_seq = ATOMIC_INIT(0);
30773 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30774 long led_no;
30775 struct xpad_led *led;
30776 struct led_classdev *led_cdev;
30777 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30778 if (!led)
30779 return -ENOMEM;
30780
30781 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30782 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30783
30784 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30785 led->xpad = xpad;
30786 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30787 index 0110b5a..d3ad144 100644
30788 --- a/drivers/input/mousedev.c
30789 +++ b/drivers/input/mousedev.c
30790 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30791
30792 spin_unlock_irq(&client->packet_lock);
30793
30794 - if (copy_to_user(buffer, data, count))
30795 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30796 return -EFAULT;
30797
30798 return count;
30799 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30800 index ba70058..571d25d 100644
30801 --- a/drivers/input/serio/serio.c
30802 +++ b/drivers/input/serio/serio.c
30803 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30804 */
30805 static void serio_init_port(struct serio *serio)
30806 {
30807 - static atomic_t serio_no = ATOMIC_INIT(0);
30808 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30809
30810 __module_get(THIS_MODULE);
30811
30812 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30813 mutex_init(&serio->drv_mutex);
30814 device_initialize(&serio->dev);
30815 dev_set_name(&serio->dev, "serio%ld",
30816 - (long)atomic_inc_return(&serio_no) - 1);
30817 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30818 serio->dev.bus = &serio_bus;
30819 serio->dev.release = serio_release_port;
30820 serio->dev.groups = serio_device_attr_groups;
30821 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30822 index e44933d..9ba484a 100644
30823 --- a/drivers/isdn/capi/capi.c
30824 +++ b/drivers/isdn/capi/capi.c
30825 @@ -83,8 +83,8 @@ struct capiminor {
30826
30827 struct capi20_appl *ap;
30828 u32 ncci;
30829 - atomic_t datahandle;
30830 - atomic_t msgid;
30831 + atomic_unchecked_t datahandle;
30832 + atomic_unchecked_t msgid;
30833
30834 struct tty_port port;
30835 int ttyinstop;
30836 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30837 capimsg_setu16(s, 2, mp->ap->applid);
30838 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30839 capimsg_setu8 (s, 5, CAPI_RESP);
30840 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30841 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30842 capimsg_setu32(s, 8, mp->ncci);
30843 capimsg_setu16(s, 12, datahandle);
30844 }
30845 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30846 mp->outbytes -= len;
30847 spin_unlock_bh(&mp->outlock);
30848
30849 - datahandle = atomic_inc_return(&mp->datahandle);
30850 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30851 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30852 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30853 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30854 capimsg_setu16(skb->data, 2, mp->ap->applid);
30855 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30856 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30857 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30858 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30859 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30860 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30861 capimsg_setu16(skb->data, 16, len); /* Data length */
30862 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30863 index db621db..825ea1a 100644
30864 --- a/drivers/isdn/gigaset/common.c
30865 +++ b/drivers/isdn/gigaset/common.c
30866 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30867 cs->commands_pending = 0;
30868 cs->cur_at_seq = 0;
30869 cs->gotfwver = -1;
30870 - cs->open_count = 0;
30871 + local_set(&cs->open_count, 0);
30872 cs->dev = NULL;
30873 cs->tty = NULL;
30874 cs->tty_dev = NULL;
30875 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30876 index 212efaf..f187c6b 100644
30877 --- a/drivers/isdn/gigaset/gigaset.h
30878 +++ b/drivers/isdn/gigaset/gigaset.h
30879 @@ -35,6 +35,7 @@
30880 #include <linux/tty_driver.h>
30881 #include <linux/list.h>
30882 #include <linux/atomic.h>
30883 +#include <asm/local.h>
30884
30885 #define GIG_VERSION {0, 5, 0, 0}
30886 #define GIG_COMPAT {0, 4, 0, 0}
30887 @@ -433,7 +434,7 @@ struct cardstate {
30888 spinlock_t cmdlock;
30889 unsigned curlen, cmdbytes;
30890
30891 - unsigned open_count;
30892 + local_t open_count;
30893 struct tty_struct *tty;
30894 struct tasklet_struct if_wake_tasklet;
30895 unsigned control_state;
30896 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30897 index ee0a549..a7c9798 100644
30898 --- a/drivers/isdn/gigaset/interface.c
30899 +++ b/drivers/isdn/gigaset/interface.c
30900 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30901 }
30902 tty->driver_data = cs;
30903
30904 - ++cs->open_count;
30905 -
30906 - if (cs->open_count == 1) {
30907 + if (local_inc_return(&cs->open_count) == 1) {
30908 spin_lock_irqsave(&cs->lock, flags);
30909 cs->tty = tty;
30910 spin_unlock_irqrestore(&cs->lock, flags);
30911 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30912
30913 if (!cs->connected)
30914 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30915 - else if (!cs->open_count)
30916 + else if (!local_read(&cs->open_count))
30917 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30918 else {
30919 - if (!--cs->open_count) {
30920 + if (!local_dec_return(&cs->open_count)) {
30921 spin_lock_irqsave(&cs->lock, flags);
30922 cs->tty = NULL;
30923 spin_unlock_irqrestore(&cs->lock, flags);
30924 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30925 if (!cs->connected) {
30926 gig_dbg(DEBUG_IF, "not connected");
30927 retval = -ENODEV;
30928 - } else if (!cs->open_count)
30929 + } else if (!local_read(&cs->open_count))
30930 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30931 else {
30932 retval = 0;
30933 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30934 retval = -ENODEV;
30935 goto done;
30936 }
30937 - if (!cs->open_count) {
30938 + if (!local_read(&cs->open_count)) {
30939 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30940 retval = -ENODEV;
30941 goto done;
30942 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30943 if (!cs->connected) {
30944 gig_dbg(DEBUG_IF, "not connected");
30945 retval = -ENODEV;
30946 - } else if (!cs->open_count)
30947 + } else if (!local_read(&cs->open_count))
30948 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30949 else if (cs->mstate != MS_LOCKED) {
30950 dev_warn(cs->dev, "can't write to unlocked device\n");
30951 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30952
30953 if (!cs->connected)
30954 gig_dbg(DEBUG_IF, "not connected");
30955 - else if (!cs->open_count)
30956 + else if (!local_read(&cs->open_count))
30957 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30958 else if (cs->mstate != MS_LOCKED)
30959 dev_warn(cs->dev, "can't write to unlocked device\n");
30960 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30961
30962 if (!cs->connected)
30963 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30964 - else if (!cs->open_count)
30965 + else if (!local_read(&cs->open_count))
30966 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30967 else
30968 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30969 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30970
30971 if (!cs->connected)
30972 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30973 - else if (!cs->open_count)
30974 + else if (!local_read(&cs->open_count))
30975 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30976 else
30977 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30978 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30979 goto out;
30980 }
30981
30982 - if (!cs->open_count) {
30983 + if (!local_read(&cs->open_count)) {
30984 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30985 goto out;
30986 }
30987 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30988 index 2a57da59..e7a12ed 100644
30989 --- a/drivers/isdn/hardware/avm/b1.c
30990 +++ b/drivers/isdn/hardware/avm/b1.c
30991 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30992 }
30993 if (left) {
30994 if (t4file->user) {
30995 - if (copy_from_user(buf, dp, left))
30996 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30997 return -EFAULT;
30998 } else {
30999 memcpy(buf, dp, left);
31000 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
31001 }
31002 if (left) {
31003 if (config->user) {
31004 - if (copy_from_user(buf, dp, left))
31005 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31006 return -EFAULT;
31007 } else {
31008 memcpy(buf, dp, left);
31009 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
31010 index 85784a7..a19ca98 100644
31011 --- a/drivers/isdn/hardware/eicon/divasync.h
31012 +++ b/drivers/isdn/hardware/eicon/divasync.h
31013 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31014 } diva_didd_add_adapter_t;
31015 typedef struct _diva_didd_remove_adapter {
31016 IDI_CALL p_request;
31017 -} diva_didd_remove_adapter_t;
31018 +} __no_const diva_didd_remove_adapter_t;
31019 typedef struct _diva_didd_read_adapter_array {
31020 void * buffer;
31021 dword length;
31022 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
31023 index a3bd163..8956575 100644
31024 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
31025 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
31026 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31027 typedef struct _diva_os_idi_adapter_interface {
31028 diva_init_card_proc_t cleanup_adapter_proc;
31029 diva_cmd_card_proc_t cmd_proc;
31030 -} diva_os_idi_adapter_interface_t;
31031 +} __no_const diva_os_idi_adapter_interface_t;
31032
31033 typedef struct _diva_os_xdi_adapter {
31034 struct list_head link;
31035 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
31036 index 2339d73..802ab87 100644
31037 --- a/drivers/isdn/i4l/isdn_net.c
31038 +++ b/drivers/isdn/i4l/isdn_net.c
31039 @@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
31040 {
31041 isdn_net_local *lp = netdev_priv(dev);
31042 unsigned char *p;
31043 - ushort len = 0;
31044 + int len = 0;
31045
31046 switch (lp->p_encap) {
31047 case ISDN_NET_ENCAP_ETHER:
31048 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
31049 index 1f355bb..43f1fea 100644
31050 --- a/drivers/isdn/icn/icn.c
31051 +++ b/drivers/isdn/icn/icn.c
31052 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
31053 if (count > len)
31054 count = len;
31055 if (user) {
31056 - if (copy_from_user(msg, buf, count))
31057 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31058 return -EFAULT;
31059 } else
31060 memcpy(msg, buf, count);
31061 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
31062 index b5fdcb7..5b6c59f 100644
31063 --- a/drivers/lguest/core.c
31064 +++ b/drivers/lguest/core.c
31065 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
31066 * it's worked so far. The end address needs +1 because __get_vm_area
31067 * allocates an extra guard page, so we need space for that.
31068 */
31069 +
31070 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31071 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31072 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31073 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31074 +#else
31075 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31076 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31077 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31078 +#endif
31079 +
31080 if (!switcher_vma) {
31081 err = -ENOMEM;
31082 printk("lguest: could not map switcher pages high\n");
31083 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
31084 * Now the Switcher is mapped at the right address, we can't fail!
31085 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
31086 */
31087 - memcpy(switcher_vma->addr, start_switcher_text,
31088 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31089 end_switcher_text - start_switcher_text);
31090
31091 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31092 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
31093 index 65af42f..530c87a 100644
31094 --- a/drivers/lguest/x86/core.c
31095 +++ b/drivers/lguest/x86/core.c
31096 @@ -59,7 +59,7 @@ static struct {
31097 /* Offset from where switcher.S was compiled to where we've copied it */
31098 static unsigned long switcher_offset(void)
31099 {
31100 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31101 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31102 }
31103
31104 /* This cpu's struct lguest_pages. */
31105 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
31106 * These copies are pretty cheap, so we do them unconditionally: */
31107 /* Save the current Host top-level page directory.
31108 */
31109 +
31110 +#ifdef CONFIG_PAX_PER_CPU_PGD
31111 + pages->state.host_cr3 = read_cr3();
31112 +#else
31113 pages->state.host_cr3 = __pa(current->mm->pgd);
31114 +#endif
31115 +
31116 /*
31117 * Set up the Guest's page tables to see this CPU's pages (and no
31118 * other CPU's pages).
31119 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31120 * compiled-in switcher code and the high-mapped copy we just made.
31121 */
31122 for (i = 0; i < IDT_ENTRIES; i++)
31123 - default_idt_entries[i] += switcher_offset();
31124 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31125
31126 /*
31127 * Set up the Switcher's per-cpu areas.
31128 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31129 * it will be undisturbed when we switch. To change %cs and jump we
31130 * need this structure to feed to Intel's "lcall" instruction.
31131 */
31132 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31133 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31134 lguest_entry.segment = LGUEST_CS;
31135
31136 /*
31137 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31138 index 40634b0..4f5855e 100644
31139 --- a/drivers/lguest/x86/switcher_32.S
31140 +++ b/drivers/lguest/x86/switcher_32.S
31141 @@ -87,6 +87,7 @@
31142 #include <asm/page.h>
31143 #include <asm/segment.h>
31144 #include <asm/lguest.h>
31145 +#include <asm/processor-flags.h>
31146
31147 // We mark the start of the code to copy
31148 // It's placed in .text tho it's never run here
31149 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31150 // Changes type when we load it: damn Intel!
31151 // For after we switch over our page tables
31152 // That entry will be read-only: we'd crash.
31153 +
31154 +#ifdef CONFIG_PAX_KERNEXEC
31155 + mov %cr0, %edx
31156 + xor $X86_CR0_WP, %edx
31157 + mov %edx, %cr0
31158 +#endif
31159 +
31160 movl $(GDT_ENTRY_TSS*8), %edx
31161 ltr %dx
31162
31163 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31164 // Let's clear it again for our return.
31165 // The GDT descriptor of the Host
31166 // Points to the table after two "size" bytes
31167 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31168 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31169 // Clear "used" from type field (byte 5, bit 2)
31170 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31171 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31172 +
31173 +#ifdef CONFIG_PAX_KERNEXEC
31174 + mov %cr0, %eax
31175 + xor $X86_CR0_WP, %eax
31176 + mov %eax, %cr0
31177 +#endif
31178
31179 // Once our page table's switched, the Guest is live!
31180 // The Host fades as we run this final step.
31181 @@ -295,13 +309,12 @@ deliver_to_host:
31182 // I consulted gcc, and it gave
31183 // These instructions, which I gladly credit:
31184 leal (%edx,%ebx,8), %eax
31185 - movzwl (%eax),%edx
31186 - movl 4(%eax), %eax
31187 - xorw %ax, %ax
31188 - orl %eax, %edx
31189 + movl 4(%eax), %edx
31190 + movw (%eax), %dx
31191 // Now the address of the handler's in %edx
31192 // We call it now: its "iret" drops us home.
31193 - jmp *%edx
31194 + ljmp $__KERNEL_CS, $1f
31195 +1: jmp *%edx
31196
31197 // Every interrupt can come to us here
31198 // But we must truly tell each apart.
31199 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31200 index 4daf9e5..b8d1d0f 100644
31201 --- a/drivers/macintosh/macio_asic.c
31202 +++ b/drivers/macintosh/macio_asic.c
31203 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31204 * MacIO is matched against any Apple ID, it's probe() function
31205 * will then decide wether it applies or not
31206 */
31207 -static const struct pci_device_id __devinitdata pci_ids [] = { {
31208 +static const struct pci_device_id __devinitconst pci_ids [] = { {
31209 .vendor = PCI_VENDOR_ID_APPLE,
31210 .device = PCI_ANY_ID,
31211 .subvendor = PCI_ANY_ID,
31212 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31213 index 31c2dc2..a2de7a6 100644
31214 --- a/drivers/md/dm-ioctl.c
31215 +++ b/drivers/md/dm-ioctl.c
31216 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31217 cmd == DM_LIST_VERSIONS_CMD)
31218 return 0;
31219
31220 - if ((cmd == DM_DEV_CREATE_CMD)) {
31221 + if (cmd == DM_DEV_CREATE_CMD) {
31222 if (!*param->name) {
31223 DMWARN("name not supplied when creating device");
31224 return -EINVAL;
31225 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31226 index 9bfd057..01180bc 100644
31227 --- a/drivers/md/dm-raid1.c
31228 +++ b/drivers/md/dm-raid1.c
31229 @@ -40,7 +40,7 @@ enum dm_raid1_error {
31230
31231 struct mirror {
31232 struct mirror_set *ms;
31233 - atomic_t error_count;
31234 + atomic_unchecked_t error_count;
31235 unsigned long error_type;
31236 struct dm_dev *dev;
31237 sector_t offset;
31238 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31239 struct mirror *m;
31240
31241 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31242 - if (!atomic_read(&m->error_count))
31243 + if (!atomic_read_unchecked(&m->error_count))
31244 return m;
31245
31246 return NULL;
31247 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31248 * simple way to tell if a device has encountered
31249 * errors.
31250 */
31251 - atomic_inc(&m->error_count);
31252 + atomic_inc_unchecked(&m->error_count);
31253
31254 if (test_and_set_bit(error_type, &m->error_type))
31255 return;
31256 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31257 struct mirror *m = get_default_mirror(ms);
31258
31259 do {
31260 - if (likely(!atomic_read(&m->error_count)))
31261 + if (likely(!atomic_read_unchecked(&m->error_count)))
31262 return m;
31263
31264 if (m-- == ms->mirror)
31265 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31266 {
31267 struct mirror *default_mirror = get_default_mirror(m->ms);
31268
31269 - return !atomic_read(&default_mirror->error_count);
31270 + return !atomic_read_unchecked(&default_mirror->error_count);
31271 }
31272
31273 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31274 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31275 */
31276 if (likely(region_in_sync(ms, region, 1)))
31277 m = choose_mirror(ms, bio->bi_sector);
31278 - else if (m && atomic_read(&m->error_count))
31279 + else if (m && atomic_read_unchecked(&m->error_count))
31280 m = NULL;
31281
31282 if (likely(m))
31283 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31284 }
31285
31286 ms->mirror[mirror].ms = ms;
31287 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31288 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31289 ms->mirror[mirror].error_type = 0;
31290 ms->mirror[mirror].offset = offset;
31291
31292 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31293 */
31294 static char device_status_char(struct mirror *m)
31295 {
31296 - if (!atomic_read(&(m->error_count)))
31297 + if (!atomic_read_unchecked(&(m->error_count)))
31298 return 'A';
31299
31300 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31301 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31302 index 3d80cf0..b77cc47 100644
31303 --- a/drivers/md/dm-stripe.c
31304 +++ b/drivers/md/dm-stripe.c
31305 @@ -20,7 +20,7 @@ struct stripe {
31306 struct dm_dev *dev;
31307 sector_t physical_start;
31308
31309 - atomic_t error_count;
31310 + atomic_unchecked_t error_count;
31311 };
31312
31313 struct stripe_c {
31314 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31315 kfree(sc);
31316 return r;
31317 }
31318 - atomic_set(&(sc->stripe[i].error_count), 0);
31319 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31320 }
31321
31322 ti->private = sc;
31323 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31324 DMEMIT("%d ", sc->stripes);
31325 for (i = 0; i < sc->stripes; i++) {
31326 DMEMIT("%s ", sc->stripe[i].dev->name);
31327 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31328 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31329 'D' : 'A';
31330 }
31331 buffer[i] = '\0';
31332 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31333 */
31334 for (i = 0; i < sc->stripes; i++)
31335 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31336 - atomic_inc(&(sc->stripe[i].error_count));
31337 - if (atomic_read(&(sc->stripe[i].error_count)) <
31338 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31339 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31340 DM_IO_ERROR_THRESHOLD)
31341 schedule_work(&sc->trigger_event);
31342 }
31343 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31344 index 8e91321..fd17aef 100644
31345 --- a/drivers/md/dm-table.c
31346 +++ b/drivers/md/dm-table.c
31347 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31348 if (!dev_size)
31349 return 0;
31350
31351 - if ((start >= dev_size) || (start + len > dev_size)) {
31352 + if ((start >= dev_size) || (len > dev_size - start)) {
31353 DMWARN("%s: %s too small for target: "
31354 "start=%llu, len=%llu, dev_size=%llu",
31355 dm_device_name(ti->table->md), bdevname(bdev, b),
31356 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31357 index 59c4f04..4c7b661 100644
31358 --- a/drivers/md/dm-thin-metadata.c
31359 +++ b/drivers/md/dm-thin-metadata.c
31360 @@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31361
31362 pmd->info.tm = tm;
31363 pmd->info.levels = 2;
31364 - pmd->info.value_type.context = pmd->data_sm;
31365 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31366 pmd->info.value_type.size = sizeof(__le64);
31367 pmd->info.value_type.inc = data_block_inc;
31368 pmd->info.value_type.dec = data_block_dec;
31369 @@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31370
31371 pmd->bl_info.tm = tm;
31372 pmd->bl_info.levels = 1;
31373 - pmd->bl_info.value_type.context = pmd->data_sm;
31374 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31375 pmd->bl_info.value_type.size = sizeof(__le64);
31376 pmd->bl_info.value_type.inc = data_block_inc;
31377 pmd->bl_info.value_type.dec = data_block_dec;
31378 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31379 index 4720f68..78d1df7 100644
31380 --- a/drivers/md/dm.c
31381 +++ b/drivers/md/dm.c
31382 @@ -177,9 +177,9 @@ struct mapped_device {
31383 /*
31384 * Event handling.
31385 */
31386 - atomic_t event_nr;
31387 + atomic_unchecked_t event_nr;
31388 wait_queue_head_t eventq;
31389 - atomic_t uevent_seq;
31390 + atomic_unchecked_t uevent_seq;
31391 struct list_head uevent_list;
31392 spinlock_t uevent_lock; /* Protect access to uevent_list */
31393
31394 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31395 rwlock_init(&md->map_lock);
31396 atomic_set(&md->holders, 1);
31397 atomic_set(&md->open_count, 0);
31398 - atomic_set(&md->event_nr, 0);
31399 - atomic_set(&md->uevent_seq, 0);
31400 + atomic_set_unchecked(&md->event_nr, 0);
31401 + atomic_set_unchecked(&md->uevent_seq, 0);
31402 INIT_LIST_HEAD(&md->uevent_list);
31403 spin_lock_init(&md->uevent_lock);
31404
31405 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31406
31407 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31408
31409 - atomic_inc(&md->event_nr);
31410 + atomic_inc_unchecked(&md->event_nr);
31411 wake_up(&md->eventq);
31412 }
31413
31414 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31415
31416 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31417 {
31418 - return atomic_add_return(1, &md->uevent_seq);
31419 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31420 }
31421
31422 uint32_t dm_get_event_nr(struct mapped_device *md)
31423 {
31424 - return atomic_read(&md->event_nr);
31425 + return atomic_read_unchecked(&md->event_nr);
31426 }
31427
31428 int dm_wait_event(struct mapped_device *md, int event_nr)
31429 {
31430 return wait_event_interruptible(md->eventq,
31431 - (event_nr != atomic_read(&md->event_nr)));
31432 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31433 }
31434
31435 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31436 diff --git a/drivers/md/md.c b/drivers/md/md.c
31437 index f47f1f8..b7f559e 100644
31438 --- a/drivers/md/md.c
31439 +++ b/drivers/md/md.c
31440 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31441 * start build, activate spare
31442 */
31443 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31444 -static atomic_t md_event_count;
31445 +static atomic_unchecked_t md_event_count;
31446 void md_new_event(struct mddev *mddev)
31447 {
31448 - atomic_inc(&md_event_count);
31449 + atomic_inc_unchecked(&md_event_count);
31450 wake_up(&md_event_waiters);
31451 }
31452 EXPORT_SYMBOL_GPL(md_new_event);
31453 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31454 */
31455 static void md_new_event_inintr(struct mddev *mddev)
31456 {
31457 - atomic_inc(&md_event_count);
31458 + atomic_inc_unchecked(&md_event_count);
31459 wake_up(&md_event_waiters);
31460 }
31461
31462 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31463
31464 rdev->preferred_minor = 0xffff;
31465 rdev->data_offset = le64_to_cpu(sb->data_offset);
31466 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31467 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31468
31469 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31470 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31471 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31472 else
31473 sb->resync_offset = cpu_to_le64(0);
31474
31475 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31476 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31477
31478 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31479 sb->size = cpu_to_le64(mddev->dev_sectors);
31480 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31481 static ssize_t
31482 errors_show(struct md_rdev *rdev, char *page)
31483 {
31484 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31485 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31486 }
31487
31488 static ssize_t
31489 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31490 char *e;
31491 unsigned long n = simple_strtoul(buf, &e, 10);
31492 if (*buf && (*e == 0 || *e == '\n')) {
31493 - atomic_set(&rdev->corrected_errors, n);
31494 + atomic_set_unchecked(&rdev->corrected_errors, n);
31495 return len;
31496 }
31497 return -EINVAL;
31498 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31499 rdev->sb_loaded = 0;
31500 rdev->bb_page = NULL;
31501 atomic_set(&rdev->nr_pending, 0);
31502 - atomic_set(&rdev->read_errors, 0);
31503 - atomic_set(&rdev->corrected_errors, 0);
31504 + atomic_set_unchecked(&rdev->read_errors, 0);
31505 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31506
31507 INIT_LIST_HEAD(&rdev->same_set);
31508 init_waitqueue_head(&rdev->blocked_wait);
31509 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31510
31511 spin_unlock(&pers_lock);
31512 seq_printf(seq, "\n");
31513 - seq->poll_event = atomic_read(&md_event_count);
31514 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31515 return 0;
31516 }
31517 if (v == (void*)2) {
31518 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31519 chunk_kb ? "KB" : "B");
31520 if (bitmap->file) {
31521 seq_printf(seq, ", file: ");
31522 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31523 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31524 }
31525
31526 seq_printf(seq, "\n");
31527 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31528 return error;
31529
31530 seq = file->private_data;
31531 - seq->poll_event = atomic_read(&md_event_count);
31532 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31533 return error;
31534 }
31535
31536 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31537 /* always allow read */
31538 mask = POLLIN | POLLRDNORM;
31539
31540 - if (seq->poll_event != atomic_read(&md_event_count))
31541 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31542 mask |= POLLERR | POLLPRI;
31543 return mask;
31544 }
31545 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31546 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31547 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31548 (int)part_stat_read(&disk->part0, sectors[1]) -
31549 - atomic_read(&disk->sync_io);
31550 + atomic_read_unchecked(&disk->sync_io);
31551 /* sync IO will cause sync_io to increase before the disk_stats
31552 * as sync_io is counted when a request starts, and
31553 * disk_stats is counted when it completes.
31554 diff --git a/drivers/md/md.h b/drivers/md/md.h
31555 index cf742d9..7c7c745 100644
31556 --- a/drivers/md/md.h
31557 +++ b/drivers/md/md.h
31558 @@ -120,13 +120,13 @@ struct md_rdev {
31559 * only maintained for arrays that
31560 * support hot removal
31561 */
31562 - atomic_t read_errors; /* number of consecutive read errors that
31563 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31564 * we have tried to ignore.
31565 */
31566 struct timespec last_read_error; /* monotonic time since our
31567 * last read error
31568 */
31569 - atomic_t corrected_errors; /* number of corrected read errors,
31570 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31571 * for reporting to userspace and storing
31572 * in superblock.
31573 */
31574 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31575
31576 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31577 {
31578 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31579 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31580 }
31581
31582 struct md_personality
31583 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31584 index 50ed53b..4f29d7d 100644
31585 --- a/drivers/md/persistent-data/dm-space-map-checker.c
31586 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
31587 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31588 /*----------------------------------------------------------------*/
31589
31590 struct sm_checker {
31591 - struct dm_space_map sm;
31592 + dm_space_map_no_const sm;
31593
31594 struct count_array old_counts;
31595 struct count_array counts;
31596 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31597 index fc469ba..2d91555 100644
31598 --- a/drivers/md/persistent-data/dm-space-map-disk.c
31599 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
31600 @@ -23,7 +23,7 @@
31601 * Space map interface.
31602 */
31603 struct sm_disk {
31604 - struct dm_space_map sm;
31605 + dm_space_map_no_const sm;
31606
31607 struct ll_disk ll;
31608 struct ll_disk old_ll;
31609 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31610 index e89ae5e..062e4c2 100644
31611 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
31612 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31613 @@ -43,7 +43,7 @@ struct block_op {
31614 };
31615
31616 struct sm_metadata {
31617 - struct dm_space_map sm;
31618 + dm_space_map_no_const sm;
31619
31620 struct ll_disk ll;
31621 struct ll_disk old_ll;
31622 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31623 index 1cbfc6b..56e1dbb 100644
31624 --- a/drivers/md/persistent-data/dm-space-map.h
31625 +++ b/drivers/md/persistent-data/dm-space-map.h
31626 @@ -60,6 +60,7 @@ struct dm_space_map {
31627 int (*root_size)(struct dm_space_map *sm, size_t *result);
31628 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31629 };
31630 +typedef struct dm_space_map __no_const dm_space_map_no_const;
31631
31632 /*----------------------------------------------------------------*/
31633
31634 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31635 index 7d9e071..015b1d5 100644
31636 --- a/drivers/md/raid1.c
31637 +++ b/drivers/md/raid1.c
31638 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31639 if (r1_sync_page_io(rdev, sect, s,
31640 bio->bi_io_vec[idx].bv_page,
31641 READ) != 0)
31642 - atomic_add(s, &rdev->corrected_errors);
31643 + atomic_add_unchecked(s, &rdev->corrected_errors);
31644 }
31645 sectors -= s;
31646 sect += s;
31647 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31648 test_bit(In_sync, &rdev->flags)) {
31649 if (r1_sync_page_io(rdev, sect, s,
31650 conf->tmppage, READ)) {
31651 - atomic_add(s, &rdev->corrected_errors);
31652 + atomic_add_unchecked(s, &rdev->corrected_errors);
31653 printk(KERN_INFO
31654 "md/raid1:%s: read error corrected "
31655 "(%d sectors at %llu on %s)\n",
31656 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31657 index 685ddf3..955b087 100644
31658 --- a/drivers/md/raid10.c
31659 +++ b/drivers/md/raid10.c
31660 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31661 /* The write handler will notice the lack of
31662 * R10BIO_Uptodate and record any errors etc
31663 */
31664 - atomic_add(r10_bio->sectors,
31665 + atomic_add_unchecked(r10_bio->sectors,
31666 &conf->mirrors[d].rdev->corrected_errors);
31667
31668 /* for reconstruct, we always reschedule after a read.
31669 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31670 {
31671 struct timespec cur_time_mon;
31672 unsigned long hours_since_last;
31673 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31674 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31675
31676 ktime_get_ts(&cur_time_mon);
31677
31678 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31679 * overflowing the shift of read_errors by hours_since_last.
31680 */
31681 if (hours_since_last >= 8 * sizeof(read_errors))
31682 - atomic_set(&rdev->read_errors, 0);
31683 + atomic_set_unchecked(&rdev->read_errors, 0);
31684 else
31685 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31686 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31687 }
31688
31689 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31690 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31691 return;
31692
31693 check_decay_read_errors(mddev, rdev);
31694 - atomic_inc(&rdev->read_errors);
31695 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31696 + atomic_inc_unchecked(&rdev->read_errors);
31697 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31698 char b[BDEVNAME_SIZE];
31699 bdevname(rdev->bdev, b);
31700
31701 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31702 "md/raid10:%s: %s: Raid device exceeded "
31703 "read_error threshold [cur %d:max %d]\n",
31704 mdname(mddev), b,
31705 - atomic_read(&rdev->read_errors), max_read_errors);
31706 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31707 printk(KERN_NOTICE
31708 "md/raid10:%s: %s: Failing raid device\n",
31709 mdname(mddev), b);
31710 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31711 (unsigned long long)(
31712 sect + rdev->data_offset),
31713 bdevname(rdev->bdev, b));
31714 - atomic_add(s, &rdev->corrected_errors);
31715 + atomic_add_unchecked(s, &rdev->corrected_errors);
31716 }
31717
31718 rdev_dec_pending(rdev, mddev);
31719 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31720 index 858fdbb..b2dac95 100644
31721 --- a/drivers/md/raid5.c
31722 +++ b/drivers/md/raid5.c
31723 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31724 (unsigned long long)(sh->sector
31725 + rdev->data_offset),
31726 bdevname(rdev->bdev, b));
31727 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31728 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31729 clear_bit(R5_ReadError, &sh->dev[i].flags);
31730 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31731 }
31732 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31733 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31734 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31735 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31736 } else {
31737 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31738 int retry = 0;
31739 rdev = conf->disks[i].rdev;
31740
31741 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31742 - atomic_inc(&rdev->read_errors);
31743 + atomic_inc_unchecked(&rdev->read_errors);
31744 if (conf->mddev->degraded >= conf->max_degraded)
31745 printk_ratelimited(
31746 KERN_WARNING
31747 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31748 (unsigned long long)(sh->sector
31749 + rdev->data_offset),
31750 bdn);
31751 - else if (atomic_read(&rdev->read_errors)
31752 + else if (atomic_read_unchecked(&rdev->read_errors)
31753 > conf->max_nr_stripes)
31754 printk(KERN_WARNING
31755 "md/raid:%s: Too many read errors, failing device %s.\n",
31756 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31757 index ba9a643..e474ab5 100644
31758 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31759 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31760 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31761 .subvendor = _subvend, .subdevice = _subdev, \
31762 .driver_data = (unsigned long)&_driverdata }
31763
31764 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31765 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31766 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31767 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31768 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31769 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31770 index a7d876f..8c21b61 100644
31771 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31772 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31773 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31774 union {
31775 dmx_ts_cb ts;
31776 dmx_section_cb sec;
31777 - } cb;
31778 + } __no_const cb;
31779
31780 struct dvb_demux *demux;
31781 void *priv;
31782 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31783 index f732877..d38c35a 100644
31784 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31785 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31786 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31787 const struct dvb_device *template, void *priv, int type)
31788 {
31789 struct dvb_device *dvbdev;
31790 - struct file_operations *dvbdevfops;
31791 + file_operations_no_const *dvbdevfops;
31792 struct device *clsdev;
31793 int minor;
31794 int id;
31795 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31796 index 9f2a02c..5920f88 100644
31797 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31798 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31799 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31800 struct dib0700_adapter_state {
31801 int (*set_param_save) (struct dvb_frontend *,
31802 struct dvb_frontend_parameters *);
31803 -};
31804 +} __no_const;
31805
31806 static int dib7070_set_param_override(struct dvb_frontend *fe,
31807 struct dvb_frontend_parameters *fep)
31808 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31809 index f103ec1..5e8968b 100644
31810 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31811 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31812 @@ -95,7 +95,7 @@ struct su3000_state {
31813
31814 struct s6x0_state {
31815 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31816 -};
31817 +} __no_const;
31818
31819 /* debug */
31820 static int dvb_usb_dw2102_debug;
31821 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31822 index 404f63a..4796533 100644
31823 --- a/drivers/media/dvb/frontends/dib3000.h
31824 +++ b/drivers/media/dvb/frontends/dib3000.h
31825 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31826 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31827 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31828 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31829 -};
31830 +} __no_const;
31831
31832 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31833 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31834 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31835 index 90bf573..e8463da 100644
31836 --- a/drivers/media/dvb/frontends/ds3000.c
31837 +++ b/drivers/media/dvb/frontends/ds3000.c
31838 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31839
31840 for (i = 0; i < 30 ; i++) {
31841 ds3000_read_status(fe, &status);
31842 - if (status && FE_HAS_LOCK)
31843 + if (status & FE_HAS_LOCK)
31844 break;
31845
31846 msleep(10);
31847 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31848 index 0564192..75b16f5 100644
31849 --- a/drivers/media/dvb/ngene/ngene-cards.c
31850 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31851 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31852
31853 /****************************************************************************/
31854
31855 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31856 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31857 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31858 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31859 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31860 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31861 index 16a089f..ab1667d 100644
31862 --- a/drivers/media/radio/radio-cadet.c
31863 +++ b/drivers/media/radio/radio-cadet.c
31864 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31865 unsigned char readbuf[RDS_BUFFER];
31866 int i = 0;
31867
31868 + if (count > RDS_BUFFER)
31869 + return -EFAULT;
31870 mutex_lock(&dev->lock);
31871 if (dev->rdsstat == 0) {
31872 dev->rdsstat = 1;
31873 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31874 index 61287fc..8b08712 100644
31875 --- a/drivers/media/rc/redrat3.c
31876 +++ b/drivers/media/rc/redrat3.c
31877 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31878 return carrier;
31879 }
31880
31881 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31882 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31883 {
31884 struct redrat3_dev *rr3 = rcdev->priv;
31885 struct device *dev = rr3->dev;
31886 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31887 index 9cde353..8c6a1c3 100644
31888 --- a/drivers/media/video/au0828/au0828.h
31889 +++ b/drivers/media/video/au0828/au0828.h
31890 @@ -191,7 +191,7 @@ struct au0828_dev {
31891
31892 /* I2C */
31893 struct i2c_adapter i2c_adap;
31894 - struct i2c_algorithm i2c_algo;
31895 + i2c_algorithm_no_const i2c_algo;
31896 struct i2c_client i2c_client;
31897 u32 i2c_rc;
31898
31899 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31900 index 68d1240..46b32eb 100644
31901 --- a/drivers/media/video/cx88/cx88-alsa.c
31902 +++ b/drivers/media/video/cx88/cx88-alsa.c
31903 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31904 * Only boards with eeprom and byte 1 at eeprom=1 have it
31905 */
31906
31907 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31908 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31909 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31910 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31911 {0, }
31912 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31913 index 305e6aa..0143317 100644
31914 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31915 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31916 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31917
31918 /* I2C stuff */
31919 struct i2c_adapter i2c_adap;
31920 - struct i2c_algorithm i2c_algo;
31921 + i2c_algorithm_no_const i2c_algo;
31922 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31923 int i2c_cx25840_hack_state;
31924 int i2c_linked;
31925 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31926 index a0895bf..b7ebb1b 100644
31927 --- a/drivers/media/video/timblogiw.c
31928 +++ b/drivers/media/video/timblogiw.c
31929 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31930
31931 /* Platform device functions */
31932
31933 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31934 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31935 .vidioc_querycap = timblogiw_querycap,
31936 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31937 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31938 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31939 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31940 };
31941
31942 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31943 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31944 .owner = THIS_MODULE,
31945 .open = timblogiw_open,
31946 .release = timblogiw_close,
31947 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31948 index e9c6a60..daf6a33 100644
31949 --- a/drivers/message/fusion/mptbase.c
31950 +++ b/drivers/message/fusion/mptbase.c
31951 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31952 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31953 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31954
31955 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31956 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31957 +#else
31958 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31959 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31960 +#endif
31961 +
31962 /*
31963 * Rounding UP to nearest 4-kB boundary here...
31964 */
31965 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31966 index 9d95042..b808101 100644
31967 --- a/drivers/message/fusion/mptsas.c
31968 +++ b/drivers/message/fusion/mptsas.c
31969 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31970 return 0;
31971 }
31972
31973 +static inline void
31974 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31975 +{
31976 + if (phy_info->port_details) {
31977 + phy_info->port_details->rphy = rphy;
31978 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31979 + ioc->name, rphy));
31980 + }
31981 +
31982 + if (rphy) {
31983 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31984 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31985 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31986 + ioc->name, rphy, rphy->dev.release));
31987 + }
31988 +}
31989 +
31990 /* no mutex */
31991 static void
31992 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31993 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31994 return NULL;
31995 }
31996
31997 -static inline void
31998 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31999 -{
32000 - if (phy_info->port_details) {
32001 - phy_info->port_details->rphy = rphy;
32002 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32003 - ioc->name, rphy));
32004 - }
32005 -
32006 - if (rphy) {
32007 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32008 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32009 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32010 - ioc->name, rphy, rphy->dev.release));
32011 - }
32012 -}
32013 -
32014 static inline struct sas_port *
32015 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32016 {
32017 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
32018 index 0c3ced7..1fe34ec 100644
32019 --- a/drivers/message/fusion/mptscsih.c
32020 +++ b/drivers/message/fusion/mptscsih.c
32021 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32022
32023 h = shost_priv(SChost);
32024
32025 - if (h) {
32026 - if (h->info_kbuf == NULL)
32027 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32028 - return h->info_kbuf;
32029 - h->info_kbuf[0] = '\0';
32030 + if (!h)
32031 + return NULL;
32032
32033 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32034 - h->info_kbuf[size-1] = '\0';
32035 - }
32036 + if (h->info_kbuf == NULL)
32037 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32038 + return h->info_kbuf;
32039 + h->info_kbuf[0] = '\0';
32040 +
32041 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32042 + h->info_kbuf[size-1] = '\0';
32043
32044 return h->info_kbuf;
32045 }
32046 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32047 index 07dbeaf..5533142 100644
32048 --- a/drivers/message/i2o/i2o_proc.c
32049 +++ b/drivers/message/i2o/i2o_proc.c
32050 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32051 "Array Controller Device"
32052 };
32053
32054 -static char *chtostr(u8 * chars, int n)
32055 -{
32056 - char tmp[256];
32057 - tmp[0] = 0;
32058 - return strncat(tmp, (char *)chars, n);
32059 -}
32060 -
32061 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32062 char *group)
32063 {
32064 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32065
32066 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32067 seq_printf(seq, "%-#8x", ddm_table.module_id);
32068 - seq_printf(seq, "%-29s",
32069 - chtostr(ddm_table.module_name_version, 28));
32070 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32071 seq_printf(seq, "%9d ", ddm_table.data_size);
32072 seq_printf(seq, "%8d", ddm_table.code_size);
32073
32074 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32075
32076 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32077 seq_printf(seq, "%-#8x", dst->module_id);
32078 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32079 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32080 + seq_printf(seq, "%-.28s", dst->module_name_version);
32081 + seq_printf(seq, "%-.8s", dst->date);
32082 seq_printf(seq, "%8d ", dst->module_size);
32083 seq_printf(seq, "%8d ", dst->mpb_size);
32084 seq_printf(seq, "0x%04x", dst->module_flags);
32085 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32086 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32087 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32088 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32089 - seq_printf(seq, "Vendor info : %s\n",
32090 - chtostr((u8 *) (work32 + 2), 16));
32091 - seq_printf(seq, "Product info : %s\n",
32092 - chtostr((u8 *) (work32 + 6), 16));
32093 - seq_printf(seq, "Description : %s\n",
32094 - chtostr((u8 *) (work32 + 10), 16));
32095 - seq_printf(seq, "Product rev. : %s\n",
32096 - chtostr((u8 *) (work32 + 14), 8));
32097 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32098 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32099 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32100 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32101
32102 seq_printf(seq, "Serial number : ");
32103 print_serial_number(seq, (u8 *) (work32 + 16),
32104 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32105 }
32106
32107 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32108 - seq_printf(seq, "Module name : %s\n",
32109 - chtostr(result.module_name, 24));
32110 - seq_printf(seq, "Module revision : %s\n",
32111 - chtostr(result.module_rev, 8));
32112 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32113 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32114
32115 seq_printf(seq, "Serial number : ");
32116 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32117 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32118 return 0;
32119 }
32120
32121 - seq_printf(seq, "Device name : %s\n",
32122 - chtostr(result.device_name, 64));
32123 - seq_printf(seq, "Service name : %s\n",
32124 - chtostr(result.service_name, 64));
32125 - seq_printf(seq, "Physical name : %s\n",
32126 - chtostr(result.physical_location, 64));
32127 - seq_printf(seq, "Instance number : %s\n",
32128 - chtostr(result.instance_number, 4));
32129 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32130 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32131 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32132 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32133
32134 return 0;
32135 }
32136 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32137 index a8c08f3..155fe3d 100644
32138 --- a/drivers/message/i2o/iop.c
32139 +++ b/drivers/message/i2o/iop.c
32140 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32141
32142 spin_lock_irqsave(&c->context_list_lock, flags);
32143
32144 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32145 - atomic_inc(&c->context_list_counter);
32146 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32147 + atomic_inc_unchecked(&c->context_list_counter);
32148
32149 - entry->context = atomic_read(&c->context_list_counter);
32150 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32151
32152 list_add(&entry->list, &c->context_list);
32153
32154 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32155
32156 #if BITS_PER_LONG == 64
32157 spin_lock_init(&c->context_list_lock);
32158 - atomic_set(&c->context_list_counter, 0);
32159 + atomic_set_unchecked(&c->context_list_counter, 0);
32160 INIT_LIST_HEAD(&c->context_list);
32161 #endif
32162
32163 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32164 index 7ce65f4..e66e9bc 100644
32165 --- a/drivers/mfd/abx500-core.c
32166 +++ b/drivers/mfd/abx500-core.c
32167 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
32168
32169 struct abx500_device_entry {
32170 struct list_head list;
32171 - struct abx500_ops ops;
32172 + abx500_ops_no_const ops;
32173 struct device *dev;
32174 };
32175
32176 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32177 index 5c2a06a..8fa077c 100644
32178 --- a/drivers/mfd/janz-cmodio.c
32179 +++ b/drivers/mfd/janz-cmodio.c
32180 @@ -13,6 +13,7 @@
32181
32182 #include <linux/kernel.h>
32183 #include <linux/module.h>
32184 +#include <linux/slab.h>
32185 #include <linux/init.h>
32186 #include <linux/pci.h>
32187 #include <linux/interrupt.h>
32188 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32189 index 29d12a7..f900ba4 100644
32190 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
32191 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32192 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
32193 * the lid is closed. This leads to interrupts as soon as a little move
32194 * is done.
32195 */
32196 - atomic_inc(&lis3->count);
32197 + atomic_inc_unchecked(&lis3->count);
32198
32199 wake_up_interruptible(&lis3->misc_wait);
32200 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
32201 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32202 if (lis3->pm_dev)
32203 pm_runtime_get_sync(lis3->pm_dev);
32204
32205 - atomic_set(&lis3->count, 0);
32206 + atomic_set_unchecked(&lis3->count, 0);
32207 return 0;
32208 }
32209
32210 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32211 add_wait_queue(&lis3->misc_wait, &wait);
32212 while (true) {
32213 set_current_state(TASK_INTERRUPTIBLE);
32214 - data = atomic_xchg(&lis3->count, 0);
32215 + data = atomic_xchg_unchecked(&lis3->count, 0);
32216 if (data)
32217 break;
32218
32219 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32220 struct lis3lv02d, miscdev);
32221
32222 poll_wait(file, &lis3->misc_wait, wait);
32223 - if (atomic_read(&lis3->count))
32224 + if (atomic_read_unchecked(&lis3->count))
32225 return POLLIN | POLLRDNORM;
32226 return 0;
32227 }
32228 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32229 index 2b1482a..5d33616 100644
32230 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32231 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32232 @@ -266,7 +266,7 @@ struct lis3lv02d {
32233 struct input_polled_dev *idev; /* input device */
32234 struct platform_device *pdev; /* platform device */
32235 struct regulator_bulk_data regulators[2];
32236 - atomic_t count; /* interrupt count after last read */
32237 + atomic_unchecked_t count; /* interrupt count after last read */
32238 union axis_conversion ac; /* hw -> logical axis */
32239 int mapped_btns[3];
32240
32241 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32242 index 2f30bad..c4c13d0 100644
32243 --- a/drivers/misc/sgi-gru/gruhandles.c
32244 +++ b/drivers/misc/sgi-gru/gruhandles.c
32245 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32246 unsigned long nsec;
32247
32248 nsec = CLKS2NSEC(clks);
32249 - atomic_long_inc(&mcs_op_statistics[op].count);
32250 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32251 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32252 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32253 if (mcs_op_statistics[op].max < nsec)
32254 mcs_op_statistics[op].max = nsec;
32255 }
32256 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32257 index 7768b87..f8aac38 100644
32258 --- a/drivers/misc/sgi-gru/gruprocfs.c
32259 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32260 @@ -32,9 +32,9 @@
32261
32262 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32263
32264 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32265 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32266 {
32267 - unsigned long val = atomic_long_read(v);
32268 + unsigned long val = atomic_long_read_unchecked(v);
32269
32270 seq_printf(s, "%16lu %s\n", val, id);
32271 }
32272 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32273
32274 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32275 for (op = 0; op < mcsop_last; op++) {
32276 - count = atomic_long_read(&mcs_op_statistics[op].count);
32277 - total = atomic_long_read(&mcs_op_statistics[op].total);
32278 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32279 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32280 max = mcs_op_statistics[op].max;
32281 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32282 count ? total / count : 0, max);
32283 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32284 index 5c3ce24..4915ccb 100644
32285 --- a/drivers/misc/sgi-gru/grutables.h
32286 +++ b/drivers/misc/sgi-gru/grutables.h
32287 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32288 * GRU statistics.
32289 */
32290 struct gru_stats_s {
32291 - atomic_long_t vdata_alloc;
32292 - atomic_long_t vdata_free;
32293 - atomic_long_t gts_alloc;
32294 - atomic_long_t gts_free;
32295 - atomic_long_t gms_alloc;
32296 - atomic_long_t gms_free;
32297 - atomic_long_t gts_double_allocate;
32298 - atomic_long_t assign_context;
32299 - atomic_long_t assign_context_failed;
32300 - atomic_long_t free_context;
32301 - atomic_long_t load_user_context;
32302 - atomic_long_t load_kernel_context;
32303 - atomic_long_t lock_kernel_context;
32304 - atomic_long_t unlock_kernel_context;
32305 - atomic_long_t steal_user_context;
32306 - atomic_long_t steal_kernel_context;
32307 - atomic_long_t steal_context_failed;
32308 - atomic_long_t nopfn;
32309 - atomic_long_t asid_new;
32310 - atomic_long_t asid_next;
32311 - atomic_long_t asid_wrap;
32312 - atomic_long_t asid_reuse;
32313 - atomic_long_t intr;
32314 - atomic_long_t intr_cbr;
32315 - atomic_long_t intr_tfh;
32316 - atomic_long_t intr_spurious;
32317 - atomic_long_t intr_mm_lock_failed;
32318 - atomic_long_t call_os;
32319 - atomic_long_t call_os_wait_queue;
32320 - atomic_long_t user_flush_tlb;
32321 - atomic_long_t user_unload_context;
32322 - atomic_long_t user_exception;
32323 - atomic_long_t set_context_option;
32324 - atomic_long_t check_context_retarget_intr;
32325 - atomic_long_t check_context_unload;
32326 - atomic_long_t tlb_dropin;
32327 - atomic_long_t tlb_preload_page;
32328 - atomic_long_t tlb_dropin_fail_no_asid;
32329 - atomic_long_t tlb_dropin_fail_upm;
32330 - atomic_long_t tlb_dropin_fail_invalid;
32331 - atomic_long_t tlb_dropin_fail_range_active;
32332 - atomic_long_t tlb_dropin_fail_idle;
32333 - atomic_long_t tlb_dropin_fail_fmm;
32334 - atomic_long_t tlb_dropin_fail_no_exception;
32335 - atomic_long_t tfh_stale_on_fault;
32336 - atomic_long_t mmu_invalidate_range;
32337 - atomic_long_t mmu_invalidate_page;
32338 - atomic_long_t flush_tlb;
32339 - atomic_long_t flush_tlb_gru;
32340 - atomic_long_t flush_tlb_gru_tgh;
32341 - atomic_long_t flush_tlb_gru_zero_asid;
32342 + atomic_long_unchecked_t vdata_alloc;
32343 + atomic_long_unchecked_t vdata_free;
32344 + atomic_long_unchecked_t gts_alloc;
32345 + atomic_long_unchecked_t gts_free;
32346 + atomic_long_unchecked_t gms_alloc;
32347 + atomic_long_unchecked_t gms_free;
32348 + atomic_long_unchecked_t gts_double_allocate;
32349 + atomic_long_unchecked_t assign_context;
32350 + atomic_long_unchecked_t assign_context_failed;
32351 + atomic_long_unchecked_t free_context;
32352 + atomic_long_unchecked_t load_user_context;
32353 + atomic_long_unchecked_t load_kernel_context;
32354 + atomic_long_unchecked_t lock_kernel_context;
32355 + atomic_long_unchecked_t unlock_kernel_context;
32356 + atomic_long_unchecked_t steal_user_context;
32357 + atomic_long_unchecked_t steal_kernel_context;
32358 + atomic_long_unchecked_t steal_context_failed;
32359 + atomic_long_unchecked_t nopfn;
32360 + atomic_long_unchecked_t asid_new;
32361 + atomic_long_unchecked_t asid_next;
32362 + atomic_long_unchecked_t asid_wrap;
32363 + atomic_long_unchecked_t asid_reuse;
32364 + atomic_long_unchecked_t intr;
32365 + atomic_long_unchecked_t intr_cbr;
32366 + atomic_long_unchecked_t intr_tfh;
32367 + atomic_long_unchecked_t intr_spurious;
32368 + atomic_long_unchecked_t intr_mm_lock_failed;
32369 + atomic_long_unchecked_t call_os;
32370 + atomic_long_unchecked_t call_os_wait_queue;
32371 + atomic_long_unchecked_t user_flush_tlb;
32372 + atomic_long_unchecked_t user_unload_context;
32373 + atomic_long_unchecked_t user_exception;
32374 + atomic_long_unchecked_t set_context_option;
32375 + atomic_long_unchecked_t check_context_retarget_intr;
32376 + atomic_long_unchecked_t check_context_unload;
32377 + atomic_long_unchecked_t tlb_dropin;
32378 + atomic_long_unchecked_t tlb_preload_page;
32379 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32380 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32381 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32382 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32383 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32384 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32385 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32386 + atomic_long_unchecked_t tfh_stale_on_fault;
32387 + atomic_long_unchecked_t mmu_invalidate_range;
32388 + atomic_long_unchecked_t mmu_invalidate_page;
32389 + atomic_long_unchecked_t flush_tlb;
32390 + atomic_long_unchecked_t flush_tlb_gru;
32391 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32392 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32393
32394 - atomic_long_t copy_gpa;
32395 - atomic_long_t read_gpa;
32396 + atomic_long_unchecked_t copy_gpa;
32397 + atomic_long_unchecked_t read_gpa;
32398
32399 - atomic_long_t mesq_receive;
32400 - atomic_long_t mesq_receive_none;
32401 - atomic_long_t mesq_send;
32402 - atomic_long_t mesq_send_failed;
32403 - atomic_long_t mesq_noop;
32404 - atomic_long_t mesq_send_unexpected_error;
32405 - atomic_long_t mesq_send_lb_overflow;
32406 - atomic_long_t mesq_send_qlimit_reached;
32407 - atomic_long_t mesq_send_amo_nacked;
32408 - atomic_long_t mesq_send_put_nacked;
32409 - atomic_long_t mesq_page_overflow;
32410 - atomic_long_t mesq_qf_locked;
32411 - atomic_long_t mesq_qf_noop_not_full;
32412 - atomic_long_t mesq_qf_switch_head_failed;
32413 - atomic_long_t mesq_qf_unexpected_error;
32414 - atomic_long_t mesq_noop_unexpected_error;
32415 - atomic_long_t mesq_noop_lb_overflow;
32416 - atomic_long_t mesq_noop_qlimit_reached;
32417 - atomic_long_t mesq_noop_amo_nacked;
32418 - atomic_long_t mesq_noop_put_nacked;
32419 - atomic_long_t mesq_noop_page_overflow;
32420 + atomic_long_unchecked_t mesq_receive;
32421 + atomic_long_unchecked_t mesq_receive_none;
32422 + atomic_long_unchecked_t mesq_send;
32423 + atomic_long_unchecked_t mesq_send_failed;
32424 + atomic_long_unchecked_t mesq_noop;
32425 + atomic_long_unchecked_t mesq_send_unexpected_error;
32426 + atomic_long_unchecked_t mesq_send_lb_overflow;
32427 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32428 + atomic_long_unchecked_t mesq_send_amo_nacked;
32429 + atomic_long_unchecked_t mesq_send_put_nacked;
32430 + atomic_long_unchecked_t mesq_page_overflow;
32431 + atomic_long_unchecked_t mesq_qf_locked;
32432 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32433 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32434 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32435 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32436 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32437 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32438 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32439 + atomic_long_unchecked_t mesq_noop_put_nacked;
32440 + atomic_long_unchecked_t mesq_noop_page_overflow;
32441
32442 };
32443
32444 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32445 tghop_invalidate, mcsop_last};
32446
32447 struct mcs_op_statistic {
32448 - atomic_long_t count;
32449 - atomic_long_t total;
32450 + atomic_long_unchecked_t count;
32451 + atomic_long_unchecked_t total;
32452 unsigned long max;
32453 };
32454
32455 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32456
32457 #define STAT(id) do { \
32458 if (gru_options & OPT_STATS) \
32459 - atomic_long_inc(&gru_stats.id); \
32460 + atomic_long_inc_unchecked(&gru_stats.id); \
32461 } while (0)
32462
32463 #ifdef CONFIG_SGI_GRU_DEBUG
32464 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32465 index 851b2f2..a4ec097 100644
32466 --- a/drivers/misc/sgi-xp/xp.h
32467 +++ b/drivers/misc/sgi-xp/xp.h
32468 @@ -289,7 +289,7 @@ struct xpc_interface {
32469 xpc_notify_func, void *);
32470 void (*received) (short, int, void *);
32471 enum xp_retval (*partid_to_nasids) (short, void *);
32472 -};
32473 +} __no_const;
32474
32475 extern struct xpc_interface xpc_interface;
32476
32477 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32478 index b94d5f7..7f494c5 100644
32479 --- a/drivers/misc/sgi-xp/xpc.h
32480 +++ b/drivers/misc/sgi-xp/xpc.h
32481 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32482 void (*received_payload) (struct xpc_channel *, void *);
32483 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32484 };
32485 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32486
32487 /* struct xpc_partition act_state values (for XPC HB) */
32488
32489 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32490 /* found in xpc_main.c */
32491 extern struct device *xpc_part;
32492 extern struct device *xpc_chan;
32493 -extern struct xpc_arch_operations xpc_arch_ops;
32494 +extern xpc_arch_operations_no_const xpc_arch_ops;
32495 extern int xpc_disengage_timelimit;
32496 extern int xpc_disengage_timedout;
32497 extern int xpc_activate_IRQ_rcvd;
32498 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32499 index 8d082b4..aa749ae 100644
32500 --- a/drivers/misc/sgi-xp/xpc_main.c
32501 +++ b/drivers/misc/sgi-xp/xpc_main.c
32502 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32503 .notifier_call = xpc_system_die,
32504 };
32505
32506 -struct xpc_arch_operations xpc_arch_ops;
32507 +xpc_arch_operations_no_const xpc_arch_ops;
32508
32509 /*
32510 * Timer function to enforce the timelimit on the partition disengage.
32511 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32512 index 6878a94..fe5c5f1 100644
32513 --- a/drivers/mmc/host/sdhci-pci.c
32514 +++ b/drivers/mmc/host/sdhci-pci.c
32515 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32516 .probe = via_probe,
32517 };
32518
32519 -static const struct pci_device_id pci_ids[] __devinitdata = {
32520 +static const struct pci_device_id pci_ids[] __devinitconst = {
32521 {
32522 .vendor = PCI_VENDOR_ID_RICOH,
32523 .device = PCI_DEVICE_ID_RICOH_R5C822,
32524 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32525 index e9fad91..0a7a16a 100644
32526 --- a/drivers/mtd/devices/doc2000.c
32527 +++ b/drivers/mtd/devices/doc2000.c
32528 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32529
32530 /* The ECC will not be calculated correctly if less than 512 is written */
32531 /* DBB-
32532 - if (len != 0x200 && eccbuf)
32533 + if (len != 0x200)
32534 printk(KERN_WARNING
32535 "ECC needs a full sector write (adr: %lx size %lx)\n",
32536 (long) to, (long) len);
32537 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32538 index a3f7a27..234016e 100644
32539 --- a/drivers/mtd/devices/doc2001.c
32540 +++ b/drivers/mtd/devices/doc2001.c
32541 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32542 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32543
32544 /* Don't allow read past end of device */
32545 - if (from >= this->totlen)
32546 + if (from >= this->totlen || !len)
32547 return -EINVAL;
32548
32549 /* Don't allow a single read to cross a 512-byte block boundary */
32550 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32551 index 3984d48..28aa897 100644
32552 --- a/drivers/mtd/nand/denali.c
32553 +++ b/drivers/mtd/nand/denali.c
32554 @@ -26,6 +26,7 @@
32555 #include <linux/pci.h>
32556 #include <linux/mtd/mtd.h>
32557 #include <linux/module.h>
32558 +#include <linux/slab.h>
32559
32560 #include "denali.h"
32561
32562 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32563 index ac40925..483b753 100644
32564 --- a/drivers/mtd/nftlmount.c
32565 +++ b/drivers/mtd/nftlmount.c
32566 @@ -24,6 +24,7 @@
32567 #include <asm/errno.h>
32568 #include <linux/delay.h>
32569 #include <linux/slab.h>
32570 +#include <linux/sched.h>
32571 #include <linux/mtd/mtd.h>
32572 #include <linux/mtd/nand.h>
32573 #include <linux/mtd/nftl.h>
32574 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32575 index 6c3fb5a..c542a81 100644
32576 --- a/drivers/mtd/ubi/build.c
32577 +++ b/drivers/mtd/ubi/build.c
32578 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32579 static int __init bytes_str_to_int(const char *str)
32580 {
32581 char *endp;
32582 - unsigned long result;
32583 + unsigned long result, scale = 1;
32584
32585 result = simple_strtoul(str, &endp, 0);
32586 if (str == endp || result >= INT_MAX) {
32587 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32588
32589 switch (*endp) {
32590 case 'G':
32591 - result *= 1024;
32592 + scale *= 1024;
32593 case 'M':
32594 - result *= 1024;
32595 + scale *= 1024;
32596 case 'K':
32597 - result *= 1024;
32598 + scale *= 1024;
32599 if (endp[1] == 'i' && endp[2] == 'B')
32600 endp += 2;
32601 case '\0':
32602 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32603 return -EINVAL;
32604 }
32605
32606 - return result;
32607 + if ((intoverflow_t)result*scale >= INT_MAX) {
32608 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32609 + str);
32610 + return -EINVAL;
32611 + }
32612 +
32613 + return result*scale;
32614 }
32615
32616 /**
32617 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32618 index 1feae59..c2a61d2 100644
32619 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
32620 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32621 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32622 */
32623
32624 #define ATL2_PARAM(X, desc) \
32625 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32626 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32627 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32628 MODULE_PARM_DESC(X, desc);
32629 #else
32630 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32631 index 9a517c2..a50cfcb 100644
32632 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32633 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32634 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32635
32636 int (*wait_comp)(struct bnx2x *bp,
32637 struct bnx2x_rx_mode_ramrod_params *p);
32638 -};
32639 +} __no_const;
32640
32641 /********************** Set multicast group ***********************************/
32642
32643 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32644 index 94b4bd0..73c02de 100644
32645 --- a/drivers/net/ethernet/broadcom/tg3.h
32646 +++ b/drivers/net/ethernet/broadcom/tg3.h
32647 @@ -134,6 +134,7 @@
32648 #define CHIPREV_ID_5750_A0 0x4000
32649 #define CHIPREV_ID_5750_A1 0x4001
32650 #define CHIPREV_ID_5750_A3 0x4003
32651 +#define CHIPREV_ID_5750_C1 0x4201
32652 #define CHIPREV_ID_5750_C2 0x4202
32653 #define CHIPREV_ID_5752_A0_HW 0x5000
32654 #define CHIPREV_ID_5752_A0 0x6000
32655 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32656 index c5f5479..2e8c260 100644
32657 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32658 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32659 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32660 */
32661 struct l2t_skb_cb {
32662 arp_failure_handler_func arp_failure_handler;
32663 -};
32664 +} __no_const;
32665
32666 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32667
32668 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32669 index 871bcaa..4043505 100644
32670 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
32671 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32672 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32673 for (i=0; i<ETH_ALEN; i++) {
32674 tmp.addr[i] = dev->dev_addr[i];
32675 }
32676 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32677 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32678 break;
32679
32680 case DE4X5_SET_HWADDR: /* Set the hardware address */
32681 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32682 spin_lock_irqsave(&lp->lock, flags);
32683 memcpy(&statbuf, &lp->pktStats, ioc->len);
32684 spin_unlock_irqrestore(&lp->lock, flags);
32685 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32686 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32687 return -EFAULT;
32688 break;
32689 }
32690 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32691 index 14d5b61..1398636 100644
32692 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
32693 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32694 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32695 {NULL}};
32696
32697
32698 -static const char *block_name[] __devinitdata = {
32699 +static const char *block_name[] __devinitconst = {
32700 "21140 non-MII",
32701 "21140 MII PHY",
32702 "21142 Serial PHY",
32703 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32704 index 4d01219..b58d26d 100644
32705 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32706 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32707 @@ -236,7 +236,7 @@ struct pci_id_info {
32708 int drv_flags; /* Driver use, intended as capability flags. */
32709 };
32710
32711 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32712 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32713 { /* Sometime a Level-One switch card. */
32714 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32715 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32716 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32717 index dcd7f7a..ecb7fb3 100644
32718 --- a/drivers/net/ethernet/dlink/sundance.c
32719 +++ b/drivers/net/ethernet/dlink/sundance.c
32720 @@ -218,7 +218,7 @@ enum {
32721 struct pci_id_info {
32722 const char *name;
32723 };
32724 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32725 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32726 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32727 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32728 {"D-Link DFE-580TX 4 port Server Adapter"},
32729 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32730 index bf266a0..e024af7 100644
32731 --- a/drivers/net/ethernet/emulex/benet/be_main.c
32732 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
32733 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32734
32735 if (wrapped)
32736 newacc += 65536;
32737 - ACCESS_ONCE(*acc) = newacc;
32738 + ACCESS_ONCE_RW(*acc) = newacc;
32739 }
32740
32741 void be_parse_stats(struct be_adapter *adapter)
32742 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32743 index 61d2bdd..7f1154a 100644
32744 --- a/drivers/net/ethernet/fealnx.c
32745 +++ b/drivers/net/ethernet/fealnx.c
32746 @@ -150,7 +150,7 @@ struct chip_info {
32747 int flags;
32748 };
32749
32750 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32751 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32752 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32753 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32754 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32755 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32756 index e1159e5..e18684d 100644
32757 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32758 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32759 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32760 {
32761 struct e1000_hw *hw = &adapter->hw;
32762 struct e1000_mac_info *mac = &hw->mac;
32763 - struct e1000_mac_operations *func = &mac->ops;
32764 + e1000_mac_operations_no_const *func = &mac->ops;
32765
32766 /* Set media type */
32767 switch (adapter->pdev->device) {
32768 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32769 index a3e65fd..f451444 100644
32770 --- a/drivers/net/ethernet/intel/e1000e/82571.c
32771 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
32772 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32773 {
32774 struct e1000_hw *hw = &adapter->hw;
32775 struct e1000_mac_info *mac = &hw->mac;
32776 - struct e1000_mac_operations *func = &mac->ops;
32777 + e1000_mac_operations_no_const *func = &mac->ops;
32778 u32 swsm = 0;
32779 u32 swsm2 = 0;
32780 bool force_clear_smbi = false;
32781 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32782 index 2967039..ca8c40c 100644
32783 --- a/drivers/net/ethernet/intel/e1000e/hw.h
32784 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
32785 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32786 void (*write_vfta)(struct e1000_hw *, u32, u32);
32787 s32 (*read_mac_addr)(struct e1000_hw *);
32788 };
32789 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32790
32791 /*
32792 * When to use various PHY register access functions:
32793 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32794 void (*power_up)(struct e1000_hw *);
32795 void (*power_down)(struct e1000_hw *);
32796 };
32797 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32798
32799 /* Function pointers for the NVM. */
32800 struct e1000_nvm_operations {
32801 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32802 s32 (*validate)(struct e1000_hw *);
32803 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32804 };
32805 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32806
32807 struct e1000_mac_info {
32808 - struct e1000_mac_operations ops;
32809 + e1000_mac_operations_no_const ops;
32810 u8 addr[ETH_ALEN];
32811 u8 perm_addr[ETH_ALEN];
32812
32813 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32814 };
32815
32816 struct e1000_phy_info {
32817 - struct e1000_phy_operations ops;
32818 + e1000_phy_operations_no_const ops;
32819
32820 enum e1000_phy_type type;
32821
32822 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32823 };
32824
32825 struct e1000_nvm_info {
32826 - struct e1000_nvm_operations ops;
32827 + e1000_nvm_operations_no_const ops;
32828
32829 enum e1000_nvm_type type;
32830 enum e1000_nvm_override override;
32831 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32832 index 4519a13..f97fcd0 100644
32833 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32834 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32835 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32836 s32 (*read_mac_addr)(struct e1000_hw *);
32837 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32838 };
32839 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32840
32841 struct e1000_phy_operations {
32842 s32 (*acquire)(struct e1000_hw *);
32843 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
32844 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32845 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32846 };
32847 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32848
32849 struct e1000_nvm_operations {
32850 s32 (*acquire)(struct e1000_hw *);
32851 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32852 s32 (*update)(struct e1000_hw *);
32853 s32 (*validate)(struct e1000_hw *);
32854 };
32855 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32856
32857 struct e1000_info {
32858 s32 (*get_invariants)(struct e1000_hw *);
32859 @@ -350,7 +353,7 @@ struct e1000_info {
32860 extern const struct e1000_info e1000_82575_info;
32861
32862 struct e1000_mac_info {
32863 - struct e1000_mac_operations ops;
32864 + e1000_mac_operations_no_const ops;
32865
32866 u8 addr[6];
32867 u8 perm_addr[6];
32868 @@ -388,7 +391,7 @@ struct e1000_mac_info {
32869 };
32870
32871 struct e1000_phy_info {
32872 - struct e1000_phy_operations ops;
32873 + e1000_phy_operations_no_const ops;
32874
32875 enum e1000_phy_type type;
32876
32877 @@ -423,7 +426,7 @@ struct e1000_phy_info {
32878 };
32879
32880 struct e1000_nvm_info {
32881 - struct e1000_nvm_operations ops;
32882 + e1000_nvm_operations_no_const ops;
32883 enum e1000_nvm_type type;
32884 enum e1000_nvm_override override;
32885
32886 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32887 s32 (*check_for_ack)(struct e1000_hw *, u16);
32888 s32 (*check_for_rst)(struct e1000_hw *, u16);
32889 };
32890 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32891
32892 struct e1000_mbx_stats {
32893 u32 msgs_tx;
32894 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32895 };
32896
32897 struct e1000_mbx_info {
32898 - struct e1000_mbx_operations ops;
32899 + e1000_mbx_operations_no_const ops;
32900 struct e1000_mbx_stats stats;
32901 u32 timeout;
32902 u32 usec_delay;
32903 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32904 index d7ed58f..64cde36 100644
32905 --- a/drivers/net/ethernet/intel/igbvf/vf.h
32906 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
32907 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
32908 s32 (*read_mac_addr)(struct e1000_hw *);
32909 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32910 };
32911 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32912
32913 struct e1000_mac_info {
32914 - struct e1000_mac_operations ops;
32915 + e1000_mac_operations_no_const ops;
32916 u8 addr[6];
32917 u8 perm_addr[6];
32918
32919 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32920 s32 (*check_for_ack)(struct e1000_hw *);
32921 s32 (*check_for_rst)(struct e1000_hw *);
32922 };
32923 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32924
32925 struct e1000_mbx_stats {
32926 u32 msgs_tx;
32927 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32928 };
32929
32930 struct e1000_mbx_info {
32931 - struct e1000_mbx_operations ops;
32932 + e1000_mbx_operations_no_const ops;
32933 struct e1000_mbx_stats stats;
32934 u32 timeout;
32935 u32 usec_delay;
32936 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32937 index 6c5cca8..de8ef63 100644
32938 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32939 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32940 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32941 s32 (*update_checksum)(struct ixgbe_hw *);
32942 u16 (*calc_checksum)(struct ixgbe_hw *);
32943 };
32944 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32945
32946 struct ixgbe_mac_operations {
32947 s32 (*init_hw)(struct ixgbe_hw *);
32948 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32949 /* Manageability interface */
32950 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32951 };
32952 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32953
32954 struct ixgbe_phy_operations {
32955 s32 (*identify)(struct ixgbe_hw *);
32956 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32957 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32958 s32 (*check_overtemp)(struct ixgbe_hw *);
32959 };
32960 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32961
32962 struct ixgbe_eeprom_info {
32963 - struct ixgbe_eeprom_operations ops;
32964 + ixgbe_eeprom_operations_no_const ops;
32965 enum ixgbe_eeprom_type type;
32966 u32 semaphore_delay;
32967 u16 word_size;
32968 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32969
32970 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32971 struct ixgbe_mac_info {
32972 - struct ixgbe_mac_operations ops;
32973 + ixgbe_mac_operations_no_const ops;
32974 enum ixgbe_mac_type type;
32975 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32976 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32977 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32978 };
32979
32980 struct ixgbe_phy_info {
32981 - struct ixgbe_phy_operations ops;
32982 + ixgbe_phy_operations_no_const ops;
32983 struct mdio_if_info mdio;
32984 enum ixgbe_phy_type type;
32985 u32 id;
32986 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32987 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32988 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32989 };
32990 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32991
32992 struct ixgbe_mbx_stats {
32993 u32 msgs_tx;
32994 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32995 };
32996
32997 struct ixgbe_mbx_info {
32998 - struct ixgbe_mbx_operations ops;
32999 + ixgbe_mbx_operations_no_const ops;
33000 struct ixgbe_mbx_stats stats;
33001 u32 timeout;
33002 u32 usec_delay;
33003 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
33004 index 10306b4..28df758 100644
33005 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
33006 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
33007 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
33008 s32 (*clear_vfta)(struct ixgbe_hw *);
33009 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
33010 };
33011 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33012
33013 enum ixgbe_mac_type {
33014 ixgbe_mac_unknown = 0,
33015 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
33016 };
33017
33018 struct ixgbe_mac_info {
33019 - struct ixgbe_mac_operations ops;
33020 + ixgbe_mac_operations_no_const ops;
33021 u8 addr[6];
33022 u8 perm_addr[6];
33023
33024 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33025 s32 (*check_for_ack)(struct ixgbe_hw *);
33026 s32 (*check_for_rst)(struct ixgbe_hw *);
33027 };
33028 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33029
33030 struct ixgbe_mbx_stats {
33031 u32 msgs_tx;
33032 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33033 };
33034
33035 struct ixgbe_mbx_info {
33036 - struct ixgbe_mbx_operations ops;
33037 + ixgbe_mbx_operations_no_const ops;
33038 struct ixgbe_mbx_stats stats;
33039 u32 timeout;
33040 u32 udelay;
33041 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
33042 index 94bbc85..78c12e6 100644
33043 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
33044 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
33045 @@ -40,6 +40,7 @@
33046 #include <linux/dma-mapping.h>
33047 #include <linux/slab.h>
33048 #include <linux/io-mapping.h>
33049 +#include <linux/sched.h>
33050
33051 #include <linux/mlx4/device.h>
33052 #include <linux/mlx4/doorbell.h>
33053 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33054 index 5046a64..71ca936 100644
33055 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
33056 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33057 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
33058 void (*link_down)(struct __vxge_hw_device *devh);
33059 void (*crit_err)(struct __vxge_hw_device *devh,
33060 enum vxge_hw_event type, u64 ext_data);
33061 -};
33062 +} __no_const;
33063
33064 /*
33065 * struct __vxge_hw_blockpool_entry - Block private data structure
33066 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33067 index 4a518a3..936b334 100644
33068 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33069 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33070 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
33071 struct vxge_hw_mempool_dma *dma_object,
33072 u32 index,
33073 u32 is_last);
33074 -};
33075 +} __no_const;
33076
33077 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
33078 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
33079 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
33080 index c8f47f1..5da9840 100644
33081 --- a/drivers/net/ethernet/realtek/r8169.c
33082 +++ b/drivers/net/ethernet/realtek/r8169.c
33083 @@ -698,17 +698,17 @@ struct rtl8169_private {
33084 struct mdio_ops {
33085 void (*write)(void __iomem *, int, int);
33086 int (*read)(void __iomem *, int);
33087 - } mdio_ops;
33088 + } __no_const mdio_ops;
33089
33090 struct pll_power_ops {
33091 void (*down)(struct rtl8169_private *);
33092 void (*up)(struct rtl8169_private *);
33093 - } pll_power_ops;
33094 + } __no_const pll_power_ops;
33095
33096 struct jumbo_ops {
33097 void (*enable)(struct rtl8169_private *);
33098 void (*disable)(struct rtl8169_private *);
33099 - } jumbo_ops;
33100 + } __no_const jumbo_ops;
33101
33102 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
33103 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33104 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
33105 index 1b4658c..a30dabb 100644
33106 --- a/drivers/net/ethernet/sis/sis190.c
33107 +++ b/drivers/net/ethernet/sis/sis190.c
33108 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
33109 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
33110 struct net_device *dev)
33111 {
33112 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
33113 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
33114 struct sis190_private *tp = netdev_priv(dev);
33115 struct pci_dev *isa_bridge;
33116 u8 reg, tmp8;
33117 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
33118 index edfa15d..002bfa9 100644
33119 --- a/drivers/net/ppp/ppp_generic.c
33120 +++ b/drivers/net/ppp/ppp_generic.c
33121 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33122 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
33123 struct ppp_stats stats;
33124 struct ppp_comp_stats cstats;
33125 - char *vers;
33126
33127 switch (cmd) {
33128 case SIOCGPPPSTATS:
33129 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33130 break;
33131
33132 case SIOCGPPPVER:
33133 - vers = PPP_VERSION;
33134 - if (copy_to_user(addr, vers, strlen(vers) + 1))
33135 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
33136 break;
33137 err = 0;
33138 break;
33139 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
33140 index 515f122..41dd273 100644
33141 --- a/drivers/net/tokenring/abyss.c
33142 +++ b/drivers/net/tokenring/abyss.c
33143 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
33144
33145 static int __init abyss_init (void)
33146 {
33147 - abyss_netdev_ops = tms380tr_netdev_ops;
33148 + pax_open_kernel();
33149 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33150
33151 - abyss_netdev_ops.ndo_open = abyss_open;
33152 - abyss_netdev_ops.ndo_stop = abyss_close;
33153 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
33154 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
33155 + pax_close_kernel();
33156
33157 return pci_register_driver(&abyss_driver);
33158 }
33159 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
33160 index 6153cfd..cf69c1c 100644
33161 --- a/drivers/net/tokenring/madgemc.c
33162 +++ b/drivers/net/tokenring/madgemc.c
33163 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
33164
33165 static int __init madgemc_init (void)
33166 {
33167 - madgemc_netdev_ops = tms380tr_netdev_ops;
33168 - madgemc_netdev_ops.ndo_open = madgemc_open;
33169 - madgemc_netdev_ops.ndo_stop = madgemc_close;
33170 + pax_open_kernel();
33171 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33172 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
33173 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
33174 + pax_close_kernel();
33175
33176 return mca_register_driver (&madgemc_driver);
33177 }
33178 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
33179 index 8d362e6..f91cc52 100644
33180 --- a/drivers/net/tokenring/proteon.c
33181 +++ b/drivers/net/tokenring/proteon.c
33182 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
33183 struct platform_device *pdev;
33184 int i, num = 0, err = 0;
33185
33186 - proteon_netdev_ops = tms380tr_netdev_ops;
33187 - proteon_netdev_ops.ndo_open = proteon_open;
33188 - proteon_netdev_ops.ndo_stop = tms380tr_close;
33189 + pax_open_kernel();
33190 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33191 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
33192 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
33193 + pax_close_kernel();
33194
33195 err = platform_driver_register(&proteon_driver);
33196 if (err)
33197 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
33198 index 46db5c5..37c1536 100644
33199 --- a/drivers/net/tokenring/skisa.c
33200 +++ b/drivers/net/tokenring/skisa.c
33201 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
33202 struct platform_device *pdev;
33203 int i, num = 0, err = 0;
33204
33205 - sk_isa_netdev_ops = tms380tr_netdev_ops;
33206 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
33207 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33208 + pax_open_kernel();
33209 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33210 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33211 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33212 + pax_close_kernel();
33213
33214 err = platform_driver_register(&sk_isa_driver);
33215 if (err)
33216 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33217 index 304fe78..db112fa 100644
33218 --- a/drivers/net/usb/hso.c
33219 +++ b/drivers/net/usb/hso.c
33220 @@ -71,7 +71,7 @@
33221 #include <asm/byteorder.h>
33222 #include <linux/serial_core.h>
33223 #include <linux/serial.h>
33224 -
33225 +#include <asm/local.h>
33226
33227 #define MOD_AUTHOR "Option Wireless"
33228 #define MOD_DESCRIPTION "USB High Speed Option driver"
33229 @@ -257,7 +257,7 @@ struct hso_serial {
33230
33231 /* from usb_serial_port */
33232 struct tty_struct *tty;
33233 - int open_count;
33234 + local_t open_count;
33235 spinlock_t serial_lock;
33236
33237 int (*write_data) (struct hso_serial *serial);
33238 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33239 struct urb *urb;
33240
33241 urb = serial->rx_urb[0];
33242 - if (serial->open_count > 0) {
33243 + if (local_read(&serial->open_count) > 0) {
33244 count = put_rxbuf_data(urb, serial);
33245 if (count == -1)
33246 return;
33247 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33248 DUMP1(urb->transfer_buffer, urb->actual_length);
33249
33250 /* Anyone listening? */
33251 - if (serial->open_count == 0)
33252 + if (local_read(&serial->open_count) == 0)
33253 return;
33254
33255 if (status == 0) {
33256 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33257 spin_unlock_irq(&serial->serial_lock);
33258
33259 /* check for port already opened, if not set the termios */
33260 - serial->open_count++;
33261 - if (serial->open_count == 1) {
33262 + if (local_inc_return(&serial->open_count) == 1) {
33263 serial->rx_state = RX_IDLE;
33264 /* Force default termio settings */
33265 _hso_serial_set_termios(tty, NULL);
33266 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33267 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33268 if (result) {
33269 hso_stop_serial_device(serial->parent);
33270 - serial->open_count--;
33271 + local_dec(&serial->open_count);
33272 kref_put(&serial->parent->ref, hso_serial_ref_free);
33273 }
33274 } else {
33275 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33276
33277 /* reset the rts and dtr */
33278 /* do the actual close */
33279 - serial->open_count--;
33280 + local_dec(&serial->open_count);
33281
33282 - if (serial->open_count <= 0) {
33283 - serial->open_count = 0;
33284 + if (local_read(&serial->open_count) <= 0) {
33285 + local_set(&serial->open_count, 0);
33286 spin_lock_irq(&serial->serial_lock);
33287 if (serial->tty == tty) {
33288 serial->tty->driver_data = NULL;
33289 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33290
33291 /* the actual setup */
33292 spin_lock_irqsave(&serial->serial_lock, flags);
33293 - if (serial->open_count)
33294 + if (local_read(&serial->open_count))
33295 _hso_serial_set_termios(tty, old);
33296 else
33297 tty->termios = old;
33298 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33299 D1("Pending read interrupt on port %d\n", i);
33300 spin_lock(&serial->serial_lock);
33301 if (serial->rx_state == RX_IDLE &&
33302 - serial->open_count > 0) {
33303 + local_read(&serial->open_count) > 0) {
33304 /* Setup and send a ctrl req read on
33305 * port i */
33306 if (!serial->rx_urb_filled[0]) {
33307 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33308 /* Start all serial ports */
33309 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33310 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33311 - if (dev2ser(serial_table[i])->open_count) {
33312 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33313 result =
33314 hso_start_serial_device(serial_table[i], GFP_NOIO);
33315 hso_kick_transmit(dev2ser(serial_table[i]));
33316 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33317 index e662cbc..8d4a102 100644
33318 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33319 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33320 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33321 * Return with error code if any of the queue indices
33322 * is out of range
33323 */
33324 - if (p->ring_index[i] < 0 ||
33325 - p->ring_index[i] >= adapter->num_rx_queues)
33326 + if (p->ring_index[i] >= adapter->num_rx_queues)
33327 return -EINVAL;
33328 }
33329
33330 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33331 index 0f9ee46..e2d6e65 100644
33332 --- a/drivers/net/wireless/ath/ath.h
33333 +++ b/drivers/net/wireless/ath/ath.h
33334 @@ -119,6 +119,7 @@ struct ath_ops {
33335 void (*write_flush) (void *);
33336 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33337 };
33338 +typedef struct ath_ops __no_const ath_ops_no_const;
33339
33340 struct ath_common;
33341 struct ath_bus_ops;
33342 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33343 index b592016..fe47870 100644
33344 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33345 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33346 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33347 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33348 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33349
33350 - ACCESS_ONCE(ads->ds_link) = i->link;
33351 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33352 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
33353 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33354
33355 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33356 ctl6 = SM(i->keytype, AR_EncrType);
33357 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33358
33359 if ((i->is_first || i->is_last) &&
33360 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33361 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33362 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33363 | set11nTries(i->rates, 1)
33364 | set11nTries(i->rates, 2)
33365 | set11nTries(i->rates, 3)
33366 | (i->dur_update ? AR_DurUpdateEna : 0)
33367 | SM(0, AR_BurstDur);
33368
33369 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33370 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33371 | set11nRate(i->rates, 1)
33372 | set11nRate(i->rates, 2)
33373 | set11nRate(i->rates, 3);
33374 } else {
33375 - ACCESS_ONCE(ads->ds_ctl2) = 0;
33376 - ACCESS_ONCE(ads->ds_ctl3) = 0;
33377 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33378 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33379 }
33380
33381 if (!i->is_first) {
33382 - ACCESS_ONCE(ads->ds_ctl0) = 0;
33383 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33384 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33385 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33386 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33387 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33388 return;
33389 }
33390
33391 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33392 break;
33393 }
33394
33395 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33396 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33397 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33398 | SM(i->txpower, AR_XmitPower)
33399 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33400 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33401 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33402 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33403
33404 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33405 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33406 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33407 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33408
33409 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33410 return;
33411
33412 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33413 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33414 | set11nPktDurRTSCTS(i->rates, 1);
33415
33416 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33417 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33418 | set11nPktDurRTSCTS(i->rates, 3);
33419
33420 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33421 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33422 | set11nRateFlags(i->rates, 1)
33423 | set11nRateFlags(i->rates, 2)
33424 | set11nRateFlags(i->rates, 3)
33425 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33426 index f5ae3c6..7936af3 100644
33427 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33428 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33429 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33430 (i->qcu << AR_TxQcuNum_S) | 0x17;
33431
33432 checksum += val;
33433 - ACCESS_ONCE(ads->info) = val;
33434 + ACCESS_ONCE_RW(ads->info) = val;
33435
33436 checksum += i->link;
33437 - ACCESS_ONCE(ads->link) = i->link;
33438 + ACCESS_ONCE_RW(ads->link) = i->link;
33439
33440 checksum += i->buf_addr[0];
33441 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33442 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33443 checksum += i->buf_addr[1];
33444 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33445 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33446 checksum += i->buf_addr[2];
33447 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33448 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33449 checksum += i->buf_addr[3];
33450 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33451 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33452
33453 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33454 - ACCESS_ONCE(ads->ctl3) = val;
33455 + ACCESS_ONCE_RW(ads->ctl3) = val;
33456 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33457 - ACCESS_ONCE(ads->ctl5) = val;
33458 + ACCESS_ONCE_RW(ads->ctl5) = val;
33459 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33460 - ACCESS_ONCE(ads->ctl7) = val;
33461 + ACCESS_ONCE_RW(ads->ctl7) = val;
33462 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33463 - ACCESS_ONCE(ads->ctl9) = val;
33464 + ACCESS_ONCE_RW(ads->ctl9) = val;
33465
33466 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33467 - ACCESS_ONCE(ads->ctl10) = checksum;
33468 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
33469
33470 if (i->is_first || i->is_last) {
33471 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33472 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33473 | set11nTries(i->rates, 1)
33474 | set11nTries(i->rates, 2)
33475 | set11nTries(i->rates, 3)
33476 | (i->dur_update ? AR_DurUpdateEna : 0)
33477 | SM(0, AR_BurstDur);
33478
33479 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33480 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33481 | set11nRate(i->rates, 1)
33482 | set11nRate(i->rates, 2)
33483 | set11nRate(i->rates, 3);
33484 } else {
33485 - ACCESS_ONCE(ads->ctl13) = 0;
33486 - ACCESS_ONCE(ads->ctl14) = 0;
33487 + ACCESS_ONCE_RW(ads->ctl13) = 0;
33488 + ACCESS_ONCE_RW(ads->ctl14) = 0;
33489 }
33490
33491 ads->ctl20 = 0;
33492 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33493
33494 ctl17 = SM(i->keytype, AR_EncrType);
33495 if (!i->is_first) {
33496 - ACCESS_ONCE(ads->ctl11) = 0;
33497 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33498 - ACCESS_ONCE(ads->ctl15) = 0;
33499 - ACCESS_ONCE(ads->ctl16) = 0;
33500 - ACCESS_ONCE(ads->ctl17) = ctl17;
33501 - ACCESS_ONCE(ads->ctl18) = 0;
33502 - ACCESS_ONCE(ads->ctl19) = 0;
33503 + ACCESS_ONCE_RW(ads->ctl11) = 0;
33504 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33505 + ACCESS_ONCE_RW(ads->ctl15) = 0;
33506 + ACCESS_ONCE_RW(ads->ctl16) = 0;
33507 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33508 + ACCESS_ONCE_RW(ads->ctl18) = 0;
33509 + ACCESS_ONCE_RW(ads->ctl19) = 0;
33510 return;
33511 }
33512
33513 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33514 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33515 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33516 | SM(i->txpower, AR_XmitPower)
33517 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33518 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33519 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33520 ctl12 |= SM(val, AR_PAPRDChainMask);
33521
33522 - ACCESS_ONCE(ads->ctl12) = ctl12;
33523 - ACCESS_ONCE(ads->ctl17) = ctl17;
33524 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33525 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33526
33527 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33528 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33529 | set11nPktDurRTSCTS(i->rates, 1);
33530
33531 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33532 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33533 | set11nPktDurRTSCTS(i->rates, 3);
33534
33535 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33536 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33537 | set11nRateFlags(i->rates, 1)
33538 | set11nRateFlags(i->rates, 2)
33539 | set11nRateFlags(i->rates, 3)
33540 | SM(i->rtscts_rate, AR_RTSCTSRate);
33541
33542 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33543 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33544 }
33545
33546 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33547 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33548 index f389b3c..7359e18 100644
33549 --- a/drivers/net/wireless/ath/ath9k/hw.h
33550 +++ b/drivers/net/wireless/ath/ath9k/hw.h
33551 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33552
33553 /* ANI */
33554 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33555 -};
33556 +} __no_const;
33557
33558 /**
33559 * struct ath_hw_ops - callbacks used by hardware code and driver code
33560 @@ -635,7 +635,7 @@ struct ath_hw_ops {
33561 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33562 struct ath_hw_antcomb_conf *antconf);
33563
33564 -};
33565 +} __no_const;
33566
33567 struct ath_nf_limits {
33568 s16 max;
33569 @@ -655,7 +655,7 @@ enum ath_cal_list {
33570 #define AH_FASTCC 0x4
33571
33572 struct ath_hw {
33573 - struct ath_ops reg_ops;
33574 + ath_ops_no_const reg_ops;
33575
33576 struct ieee80211_hw *hw;
33577 struct ath_common common;
33578 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33579 index bea8524..c677c06 100644
33580 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33581 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33582 @@ -547,7 +547,7 @@ struct phy_func_ptr {
33583 void (*carrsuppr)(struct brcms_phy *);
33584 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33585 void (*detach)(struct brcms_phy *);
33586 -};
33587 +} __no_const;
33588
33589 struct brcms_phy {
33590 struct brcms_phy_pub pubpi_ro;
33591 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33592 index 05f2ad1..ae00eea 100644
33593 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33594 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33595 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33596 */
33597 if (iwl3945_mod_params.disable_hw_scan) {
33598 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33599 - iwl3945_hw_ops.hw_scan = NULL;
33600 + pax_open_kernel();
33601 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33602 + pax_close_kernel();
33603 }
33604
33605 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33606 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33607 index 69a77e2..552b42c 100644
33608 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33609 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33610 @@ -71,8 +71,8 @@ do { \
33611 } while (0)
33612
33613 #else
33614 -#define IWL_DEBUG(m, level, fmt, args...)
33615 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33616 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33617 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33618 #define iwl_print_hex_dump(m, level, p, len)
33619 #endif /* CONFIG_IWLWIFI_DEBUG */
33620
33621 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33622 index 523ad55..f8c5dc5 100644
33623 --- a/drivers/net/wireless/mac80211_hwsim.c
33624 +++ b/drivers/net/wireless/mac80211_hwsim.c
33625 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33626 return -EINVAL;
33627
33628 if (fake_hw_scan) {
33629 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33630 - mac80211_hwsim_ops.sw_scan_start = NULL;
33631 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33632 + pax_open_kernel();
33633 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33634 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33635 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33636 + pax_close_kernel();
33637 }
33638
33639 spin_lock_init(&hwsim_radio_lock);
33640 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33641 index 30f138b..c904585 100644
33642 --- a/drivers/net/wireless/mwifiex/main.h
33643 +++ b/drivers/net/wireless/mwifiex/main.h
33644 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33645 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33646 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33647 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33648 -};
33649 +} __no_const;
33650
33651 struct mwifiex_adapter {
33652 u8 iface_type;
33653 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33654 index 0c13840..a5c3ed6 100644
33655 --- a/drivers/net/wireless/rndis_wlan.c
33656 +++ b/drivers/net/wireless/rndis_wlan.c
33657 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33658
33659 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33660
33661 - if (rts_threshold < 0 || rts_threshold > 2347)
33662 + if (rts_threshold > 2347)
33663 rts_threshold = 2347;
33664
33665 tmp = cpu_to_le32(rts_threshold);
33666 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33667 index a77f1bb..c608b2b 100644
33668 --- a/drivers/net/wireless/wl1251/wl1251.h
33669 +++ b/drivers/net/wireless/wl1251/wl1251.h
33670 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33671 void (*reset)(struct wl1251 *wl);
33672 void (*enable_irq)(struct wl1251 *wl);
33673 void (*disable_irq)(struct wl1251 *wl);
33674 -};
33675 +} __no_const;
33676
33677 struct wl1251 {
33678 struct ieee80211_hw *hw;
33679 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33680 index f34b5b2..b5abb9f 100644
33681 --- a/drivers/oprofile/buffer_sync.c
33682 +++ b/drivers/oprofile/buffer_sync.c
33683 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33684 if (cookie == NO_COOKIE)
33685 offset = pc;
33686 if (cookie == INVALID_COOKIE) {
33687 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33688 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33689 offset = pc;
33690 }
33691 if (cookie != last_cookie) {
33692 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33693 /* add userspace sample */
33694
33695 if (!mm) {
33696 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33697 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33698 return 0;
33699 }
33700
33701 cookie = lookup_dcookie(mm, s->eip, &offset);
33702
33703 if (cookie == INVALID_COOKIE) {
33704 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33705 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33706 return 0;
33707 }
33708
33709 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33710 /* ignore backtraces if failed to add a sample */
33711 if (state == sb_bt_start) {
33712 state = sb_bt_ignore;
33713 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33714 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33715 }
33716 }
33717 release_mm(mm);
33718 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33719 index c0cc4e7..44d4e54 100644
33720 --- a/drivers/oprofile/event_buffer.c
33721 +++ b/drivers/oprofile/event_buffer.c
33722 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33723 }
33724
33725 if (buffer_pos == buffer_size) {
33726 - atomic_inc(&oprofile_stats.event_lost_overflow);
33727 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33728 return;
33729 }
33730
33731 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33732 index f8c752e..28bf4fc 100644
33733 --- a/drivers/oprofile/oprof.c
33734 +++ b/drivers/oprofile/oprof.c
33735 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33736 if (oprofile_ops.switch_events())
33737 return;
33738
33739 - atomic_inc(&oprofile_stats.multiplex_counter);
33740 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33741 start_switch_worker();
33742 }
33743
33744 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33745 index 917d28e..d62d981 100644
33746 --- a/drivers/oprofile/oprofile_stats.c
33747 +++ b/drivers/oprofile/oprofile_stats.c
33748 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33749 cpu_buf->sample_invalid_eip = 0;
33750 }
33751
33752 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33753 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33754 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33755 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33756 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33757 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33758 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33759 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33760 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33761 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33762 }
33763
33764
33765 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33766 index 38b6fc0..b5cbfce 100644
33767 --- a/drivers/oprofile/oprofile_stats.h
33768 +++ b/drivers/oprofile/oprofile_stats.h
33769 @@ -13,11 +13,11 @@
33770 #include <linux/atomic.h>
33771
33772 struct oprofile_stat_struct {
33773 - atomic_t sample_lost_no_mm;
33774 - atomic_t sample_lost_no_mapping;
33775 - atomic_t bt_lost_no_mapping;
33776 - atomic_t event_lost_overflow;
33777 - atomic_t multiplex_counter;
33778 + atomic_unchecked_t sample_lost_no_mm;
33779 + atomic_unchecked_t sample_lost_no_mapping;
33780 + atomic_unchecked_t bt_lost_no_mapping;
33781 + atomic_unchecked_t event_lost_overflow;
33782 + atomic_unchecked_t multiplex_counter;
33783 };
33784
33785 extern struct oprofile_stat_struct oprofile_stats;
33786 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33787 index 2f0aa0f..90fab02 100644
33788 --- a/drivers/oprofile/oprofilefs.c
33789 +++ b/drivers/oprofile/oprofilefs.c
33790 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33791
33792
33793 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33794 - char const *name, atomic_t *val)
33795 + char const *name, atomic_unchecked_t *val)
33796 {
33797 return __oprofilefs_create_file(sb, root, name,
33798 &atomic_ro_fops, 0444, val);
33799 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33800 index 3f56bc0..707d642 100644
33801 --- a/drivers/parport/procfs.c
33802 +++ b/drivers/parport/procfs.c
33803 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33804
33805 *ppos += len;
33806
33807 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33808 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33809 }
33810
33811 #ifdef CONFIG_PARPORT_1284
33812 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33813
33814 *ppos += len;
33815
33816 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33817 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33818 }
33819 #endif /* IEEE1284.3 support. */
33820
33821 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33822 index 9fff878..ad0ad53 100644
33823 --- a/drivers/pci/hotplug/cpci_hotplug.h
33824 +++ b/drivers/pci/hotplug/cpci_hotplug.h
33825 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33826 int (*hardware_test) (struct slot* slot, u32 value);
33827 u8 (*get_power) (struct slot* slot);
33828 int (*set_power) (struct slot* slot, int value);
33829 -};
33830 +} __no_const;
33831
33832 struct cpci_hp_controller {
33833 unsigned int irq;
33834 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33835 index 76ba8a1..20ca857 100644
33836 --- a/drivers/pci/hotplug/cpqphp_nvram.c
33837 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
33838 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33839
33840 void compaq_nvram_init (void __iomem *rom_start)
33841 {
33842 +
33843 +#ifndef CONFIG_PAX_KERNEXEC
33844 if (rom_start) {
33845 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33846 }
33847 +#endif
33848 +
33849 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33850
33851 /* initialize our int15 lock */
33852 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33853 index 1cfbf22..be96487 100644
33854 --- a/drivers/pci/pcie/aspm.c
33855 +++ b/drivers/pci/pcie/aspm.c
33856 @@ -27,9 +27,9 @@
33857 #define MODULE_PARAM_PREFIX "pcie_aspm."
33858
33859 /* Note: those are not register definitions */
33860 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33861 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33862 -#define ASPM_STATE_L1 (4) /* L1 state */
33863 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33864 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33865 +#define ASPM_STATE_L1 (4U) /* L1 state */
33866 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33867 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33868
33869 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33870 index 04e74f4..a960176 100644
33871 --- a/drivers/pci/probe.c
33872 +++ b/drivers/pci/probe.c
33873 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33874 u32 l, sz, mask;
33875 u16 orig_cmd;
33876
33877 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33878 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33879
33880 if (!dev->mmio_always_on) {
33881 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33882 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33883 index 27911b5..5b6db88 100644
33884 --- a/drivers/pci/proc.c
33885 +++ b/drivers/pci/proc.c
33886 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33887 static int __init pci_proc_init(void)
33888 {
33889 struct pci_dev *dev = NULL;
33890 +
33891 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33892 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33893 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33894 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33895 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33896 +#endif
33897 +#else
33898 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33899 +#endif
33900 proc_create("devices", 0, proc_bus_pci_dir,
33901 &proc_bus_pci_dev_operations);
33902 proc_initialized = 1;
33903 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33904 index 7b82868..b9344c9 100644
33905 --- a/drivers/platform/x86/thinkpad_acpi.c
33906 +++ b/drivers/platform/x86/thinkpad_acpi.c
33907 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33908 return 0;
33909 }
33910
33911 -void static hotkey_mask_warn_incomplete_mask(void)
33912 +static void hotkey_mask_warn_incomplete_mask(void)
33913 {
33914 /* log only what the user can fix... */
33915 const u32 wantedmask = hotkey_driver_mask &
33916 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33917 }
33918 }
33919
33920 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33921 - struct tp_nvram_state *newn,
33922 - const u32 event_mask)
33923 -{
33924 -
33925 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33926 do { \
33927 if ((event_mask & (1 << __scancode)) && \
33928 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33929 tpacpi_hotkey_send_key(__scancode); \
33930 } while (0)
33931
33932 - void issue_volchange(const unsigned int oldvol,
33933 - const unsigned int newvol)
33934 - {
33935 - unsigned int i = oldvol;
33936 +static void issue_volchange(const unsigned int oldvol,
33937 + const unsigned int newvol,
33938 + const u32 event_mask)
33939 +{
33940 + unsigned int i = oldvol;
33941
33942 - while (i > newvol) {
33943 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33944 - i--;
33945 - }
33946 - while (i < newvol) {
33947 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33948 - i++;
33949 - }
33950 + while (i > newvol) {
33951 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33952 + i--;
33953 }
33954 + while (i < newvol) {
33955 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33956 + i++;
33957 + }
33958 +}
33959
33960 - void issue_brightnesschange(const unsigned int oldbrt,
33961 - const unsigned int newbrt)
33962 - {
33963 - unsigned int i = oldbrt;
33964 +static void issue_brightnesschange(const unsigned int oldbrt,
33965 + const unsigned int newbrt,
33966 + const u32 event_mask)
33967 +{
33968 + unsigned int i = oldbrt;
33969
33970 - while (i > newbrt) {
33971 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33972 - i--;
33973 - }
33974 - while (i < newbrt) {
33975 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33976 - i++;
33977 - }
33978 + while (i > newbrt) {
33979 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33980 + i--;
33981 + }
33982 + while (i < newbrt) {
33983 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33984 + i++;
33985 }
33986 +}
33987
33988 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33989 + struct tp_nvram_state *newn,
33990 + const u32 event_mask)
33991 +{
33992 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33993 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33994 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33995 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33996 oldn->volume_level != newn->volume_level) {
33997 /* recently muted, or repeated mute keypress, or
33998 * multiple presses ending in mute */
33999 - issue_volchange(oldn->volume_level, newn->volume_level);
34000 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
34001 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
34002 }
34003 } else {
34004 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34005 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
34006 }
34007 if (oldn->volume_level != newn->volume_level) {
34008 - issue_volchange(oldn->volume_level, newn->volume_level);
34009 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
34010 } else if (oldn->volume_toggle != newn->volume_toggle) {
34011 /* repeated vol up/down keypress at end of scale ? */
34012 if (newn->volume_level == 0)
34013 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34014 /* handle brightness */
34015 if (oldn->brightness_level != newn->brightness_level) {
34016 issue_brightnesschange(oldn->brightness_level,
34017 - newn->brightness_level);
34018 + newn->brightness_level,
34019 + event_mask);
34020 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
34021 /* repeated key presses that didn't change state */
34022 if (newn->brightness_level == 0)
34023 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34024 && !tp_features.bright_unkfw)
34025 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34026 }
34027 +}
34028
34029 #undef TPACPI_COMPARE_KEY
34030 #undef TPACPI_MAY_SEND_KEY
34031 -}
34032
34033 /*
34034 * Polling driver
34035 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
34036 index b859d16..5cc6b1a 100644
34037 --- a/drivers/pnp/pnpbios/bioscalls.c
34038 +++ b/drivers/pnp/pnpbios/bioscalls.c
34039 @@ -59,7 +59,7 @@ do { \
34040 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
34041 } while(0)
34042
34043 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
34044 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
34045 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
34046
34047 /*
34048 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34049
34050 cpu = get_cpu();
34051 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
34052 +
34053 + pax_open_kernel();
34054 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
34055 + pax_close_kernel();
34056
34057 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
34058 spin_lock_irqsave(&pnp_bios_lock, flags);
34059 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34060 :"memory");
34061 spin_unlock_irqrestore(&pnp_bios_lock, flags);
34062
34063 + pax_open_kernel();
34064 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
34065 + pax_close_kernel();
34066 +
34067 put_cpu();
34068
34069 /* If we get here and this is set then the PnP BIOS faulted on us. */
34070 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
34071 return status;
34072 }
34073
34074 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
34075 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
34076 {
34077 int i;
34078
34079 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34080 pnp_bios_callpoint.offset = header->fields.pm16offset;
34081 pnp_bios_callpoint.segment = PNP_CS16;
34082
34083 + pax_open_kernel();
34084 +
34085 for_each_possible_cpu(i) {
34086 struct desc_struct *gdt = get_cpu_gdt_table(i);
34087 if (!gdt)
34088 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34089 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
34090 (unsigned long)__va(header->fields.pm16dseg));
34091 }
34092 +
34093 + pax_close_kernel();
34094 }
34095 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
34096 index b0ecacb..7c9da2e 100644
34097 --- a/drivers/pnp/resource.c
34098 +++ b/drivers/pnp/resource.c
34099 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
34100 return 1;
34101
34102 /* check if the resource is valid */
34103 - if (*irq < 0 || *irq > 15)
34104 + if (*irq > 15)
34105 return 0;
34106
34107 /* check if the resource is reserved */
34108 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
34109 return 1;
34110
34111 /* check if the resource is valid */
34112 - if (*dma < 0 || *dma == 4 || *dma > 7)
34113 + if (*dma == 4 || *dma > 7)
34114 return 0;
34115
34116 /* check if the resource is reserved */
34117 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
34118 index bb16f5b..c751eef 100644
34119 --- a/drivers/power/bq27x00_battery.c
34120 +++ b/drivers/power/bq27x00_battery.c
34121 @@ -67,7 +67,7 @@
34122 struct bq27x00_device_info;
34123 struct bq27x00_access_methods {
34124 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
34125 -};
34126 +} __no_const;
34127
34128 enum bq27x00_chip { BQ27000, BQ27500 };
34129
34130 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
34131 index 33f5d9a..d957d3f 100644
34132 --- a/drivers/regulator/max8660.c
34133 +++ b/drivers/regulator/max8660.c
34134 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
34135 max8660->shadow_regs[MAX8660_OVER1] = 5;
34136 } else {
34137 /* Otherwise devices can be toggled via software */
34138 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
34139 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
34140 + pax_open_kernel();
34141 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
34142 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
34143 + pax_close_kernel();
34144 }
34145
34146 /*
34147 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
34148 index 023d17d..74ef35b 100644
34149 --- a/drivers/regulator/mc13892-regulator.c
34150 +++ b/drivers/regulator/mc13892-regulator.c
34151 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
34152 }
34153 mc13xxx_unlock(mc13892);
34154
34155 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34156 + pax_open_kernel();
34157 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34158 = mc13892_vcam_set_mode;
34159 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34160 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34161 = mc13892_vcam_get_mode;
34162 + pax_close_kernel();
34163 for (i = 0; i < pdata->num_regulators; i++) {
34164 init_data = &pdata->regulators[i];
34165 priv->regulators[i] = regulator_register(
34166 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
34167 index cace6d3..f623fda 100644
34168 --- a/drivers/rtc/rtc-dev.c
34169 +++ b/drivers/rtc/rtc-dev.c
34170 @@ -14,6 +14,7 @@
34171 #include <linux/module.h>
34172 #include <linux/rtc.h>
34173 #include <linux/sched.h>
34174 +#include <linux/grsecurity.h>
34175 #include "rtc-core.h"
34176
34177 static dev_t rtc_devt;
34178 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
34179 if (copy_from_user(&tm, uarg, sizeof(tm)))
34180 return -EFAULT;
34181
34182 + gr_log_timechange();
34183 +
34184 return rtc_set_time(rtc, &tm);
34185
34186 case RTC_PIE_ON:
34187 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
34188 index ffb5878..e6d785c 100644
34189 --- a/drivers/scsi/aacraid/aacraid.h
34190 +++ b/drivers/scsi/aacraid/aacraid.h
34191 @@ -492,7 +492,7 @@ struct adapter_ops
34192 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
34193 /* Administrative operations */
34194 int (*adapter_comm)(struct aac_dev * dev, int comm);
34195 -};
34196 +} __no_const;
34197
34198 /*
34199 * Define which interrupt handler needs to be installed
34200 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
34201 index 705e13e..91c873c 100644
34202 --- a/drivers/scsi/aacraid/linit.c
34203 +++ b/drivers/scsi/aacraid/linit.c
34204 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
34205 #elif defined(__devinitconst)
34206 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34207 #else
34208 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34209 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34210 #endif
34211 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34212 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34213 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34214 index d5ff142..49c0ebb 100644
34215 --- a/drivers/scsi/aic94xx/aic94xx_init.c
34216 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
34217 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34218 .lldd_control_phy = asd_control_phy,
34219 };
34220
34221 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34222 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34223 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34224 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34225 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34226 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34227 index a796de9..1ef20e1 100644
34228 --- a/drivers/scsi/bfa/bfa.h
34229 +++ b/drivers/scsi/bfa/bfa.h
34230 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
34231 u32 *end);
34232 int cpe_vec_q0;
34233 int rme_vec_q0;
34234 -};
34235 +} __no_const;
34236 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34237
34238 struct bfa_faa_cbfn_s {
34239 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34240 index e07bd47..cd1bbbb 100644
34241 --- a/drivers/scsi/bfa/bfa_fcpim.c
34242 +++ b/drivers/scsi/bfa/bfa_fcpim.c
34243 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34244
34245 bfa_iotag_attach(fcp);
34246
34247 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34248 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34249 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34250 (fcp->num_itns * sizeof(struct bfa_itn_s));
34251 memset(fcp->itn_arr, 0,
34252 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34253 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34254 {
34255 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34256 - struct bfa_itn_s *itn;
34257 + bfa_itn_s_no_const *itn;
34258
34259 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34260 itn->isr = isr;
34261 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34262 index 1080bcb..a3b39e3 100644
34263 --- a/drivers/scsi/bfa/bfa_fcpim.h
34264 +++ b/drivers/scsi/bfa/bfa_fcpim.h
34265 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
34266 struct bfa_itn_s {
34267 bfa_isr_func_t isr;
34268 };
34269 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34270
34271 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34272 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34273 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34274 struct list_head iotag_tio_free_q; /* free IO resources */
34275 struct list_head iotag_unused_q; /* unused IO resources*/
34276 struct bfa_iotag_s *iotag_arr;
34277 - struct bfa_itn_s *itn_arr;
34278 + bfa_itn_s_no_const *itn_arr;
34279 int num_ioim_reqs;
34280 int num_fwtio_reqs;
34281 int num_itns;
34282 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34283 index 546d46b..642fa5b 100644
34284 --- a/drivers/scsi/bfa/bfa_ioc.h
34285 +++ b/drivers/scsi/bfa/bfa_ioc.h
34286 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34287 bfa_ioc_disable_cbfn_t disable_cbfn;
34288 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34289 bfa_ioc_reset_cbfn_t reset_cbfn;
34290 -};
34291 +} __no_const;
34292
34293 /*
34294 * IOC event notification mechanism.
34295 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34296 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34297 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34298 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34299 -};
34300 +} __no_const;
34301
34302 /*
34303 * Queue element to wait for room in request queue. FIFO order is
34304 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34305 index 351dc0b..951dc32 100644
34306 --- a/drivers/scsi/hosts.c
34307 +++ b/drivers/scsi/hosts.c
34308 @@ -42,7 +42,7 @@
34309 #include "scsi_logging.h"
34310
34311
34312 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34313 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34314
34315
34316 static void scsi_host_cls_release(struct device *dev)
34317 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34318 * subtract one because we increment first then return, but we need to
34319 * know what the next host number was before increment
34320 */
34321 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34322 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34323 shost->dma_channel = 0xff;
34324
34325 /* These three are default values which can be overridden */
34326 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34327 index 865d452..e9b7fa7 100644
34328 --- a/drivers/scsi/hpsa.c
34329 +++ b/drivers/scsi/hpsa.c
34330 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34331 u32 a;
34332
34333 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34334 - return h->access.command_completed(h);
34335 + return h->access->command_completed(h);
34336
34337 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34338 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34339 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34340 while (!list_empty(&h->reqQ)) {
34341 c = list_entry(h->reqQ.next, struct CommandList, list);
34342 /* can't do anything if fifo is full */
34343 - if ((h->access.fifo_full(h))) {
34344 + if ((h->access->fifo_full(h))) {
34345 dev_warn(&h->pdev->dev, "fifo full\n");
34346 break;
34347 }
34348 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34349 h->Qdepth--;
34350
34351 /* Tell the controller execute command */
34352 - h->access.submit_command(h, c);
34353 + h->access->submit_command(h, c);
34354
34355 /* Put job onto the completed Q */
34356 addQ(&h->cmpQ, c);
34357 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34358
34359 static inline unsigned long get_next_completion(struct ctlr_info *h)
34360 {
34361 - return h->access.command_completed(h);
34362 + return h->access->command_completed(h);
34363 }
34364
34365 static inline bool interrupt_pending(struct ctlr_info *h)
34366 {
34367 - return h->access.intr_pending(h);
34368 + return h->access->intr_pending(h);
34369 }
34370
34371 static inline long interrupt_not_for_us(struct ctlr_info *h)
34372 {
34373 - return (h->access.intr_pending(h) == 0) ||
34374 + return (h->access->intr_pending(h) == 0) ||
34375 (h->interrupts_enabled == 0);
34376 }
34377
34378 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34379 if (prod_index < 0)
34380 return -ENODEV;
34381 h->product_name = products[prod_index].product_name;
34382 - h->access = *(products[prod_index].access);
34383 + h->access = products[prod_index].access;
34384
34385 if (hpsa_board_disabled(h->pdev)) {
34386 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34387 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34388
34389 assert_spin_locked(&lockup_detector_lock);
34390 remove_ctlr_from_lockup_detector_list(h);
34391 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34392 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34393 spin_lock_irqsave(&h->lock, flags);
34394 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34395 spin_unlock_irqrestore(&h->lock, flags);
34396 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34397 }
34398
34399 /* make sure the board interrupts are off */
34400 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34401 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34402
34403 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34404 goto clean2;
34405 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34406 * fake ones to scoop up any residual completions.
34407 */
34408 spin_lock_irqsave(&h->lock, flags);
34409 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34410 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34411 spin_unlock_irqrestore(&h->lock, flags);
34412 free_irq(h->intr[h->intr_mode], h);
34413 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34414 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34415 dev_info(&h->pdev->dev, "Board READY.\n");
34416 dev_info(&h->pdev->dev,
34417 "Waiting for stale completions to drain.\n");
34418 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34419 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34420 msleep(10000);
34421 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34422 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34423
34424 rc = controller_reset_failed(h->cfgtable);
34425 if (rc)
34426 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34427 }
34428
34429 /* Turn the interrupts on so we can service requests */
34430 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34431 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34432
34433 hpsa_hba_inquiry(h);
34434 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34435 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34436 * To write all data in the battery backed cache to disks
34437 */
34438 hpsa_flush_cache(h);
34439 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34440 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34441 free_irq(h->intr[h->intr_mode], h);
34442 #ifdef CONFIG_PCI_MSI
34443 if (h->msix_vector)
34444 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34445 return;
34446 }
34447 /* Change the access methods to the performant access methods */
34448 - h->access = SA5_performant_access;
34449 + h->access = &SA5_performant_access;
34450 h->transMethod = CFGTBL_Trans_Performant;
34451 }
34452
34453 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34454 index 91edafb..a9b88ec 100644
34455 --- a/drivers/scsi/hpsa.h
34456 +++ b/drivers/scsi/hpsa.h
34457 @@ -73,7 +73,7 @@ struct ctlr_info {
34458 unsigned int msix_vector;
34459 unsigned int msi_vector;
34460 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34461 - struct access_method access;
34462 + struct access_method *access;
34463
34464 /* queue and queue Info */
34465 struct list_head reqQ;
34466 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34467 index f2df059..a3a9930 100644
34468 --- a/drivers/scsi/ips.h
34469 +++ b/drivers/scsi/ips.h
34470 @@ -1027,7 +1027,7 @@ typedef struct {
34471 int (*intr)(struct ips_ha *);
34472 void (*enableint)(struct ips_ha *);
34473 uint32_t (*statupd)(struct ips_ha *);
34474 -} ips_hw_func_t;
34475 +} __no_const ips_hw_func_t;
34476
34477 typedef struct ips_ha {
34478 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34479 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34480 index 9de9db2..1e09660 100644
34481 --- a/drivers/scsi/libfc/fc_exch.c
34482 +++ b/drivers/scsi/libfc/fc_exch.c
34483 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
34484 * all together if not used XXX
34485 */
34486 struct {
34487 - atomic_t no_free_exch;
34488 - atomic_t no_free_exch_xid;
34489 - atomic_t xid_not_found;
34490 - atomic_t xid_busy;
34491 - atomic_t seq_not_found;
34492 - atomic_t non_bls_resp;
34493 + atomic_unchecked_t no_free_exch;
34494 + atomic_unchecked_t no_free_exch_xid;
34495 + atomic_unchecked_t xid_not_found;
34496 + atomic_unchecked_t xid_busy;
34497 + atomic_unchecked_t seq_not_found;
34498 + atomic_unchecked_t non_bls_resp;
34499 } stats;
34500 };
34501
34502 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34503 /* allocate memory for exchange */
34504 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34505 if (!ep) {
34506 - atomic_inc(&mp->stats.no_free_exch);
34507 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34508 goto out;
34509 }
34510 memset(ep, 0, sizeof(*ep));
34511 @@ -780,7 +780,7 @@ out:
34512 return ep;
34513 err:
34514 spin_unlock_bh(&pool->lock);
34515 - atomic_inc(&mp->stats.no_free_exch_xid);
34516 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34517 mempool_free(ep, mp->ep_pool);
34518 return NULL;
34519 }
34520 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34521 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34522 ep = fc_exch_find(mp, xid);
34523 if (!ep) {
34524 - atomic_inc(&mp->stats.xid_not_found);
34525 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34526 reject = FC_RJT_OX_ID;
34527 goto out;
34528 }
34529 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34530 ep = fc_exch_find(mp, xid);
34531 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34532 if (ep) {
34533 - atomic_inc(&mp->stats.xid_busy);
34534 + atomic_inc_unchecked(&mp->stats.xid_busy);
34535 reject = FC_RJT_RX_ID;
34536 goto rel;
34537 }
34538 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34539 }
34540 xid = ep->xid; /* get our XID */
34541 } else if (!ep) {
34542 - atomic_inc(&mp->stats.xid_not_found);
34543 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34544 reject = FC_RJT_RX_ID; /* XID not found */
34545 goto out;
34546 }
34547 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34548 } else {
34549 sp = &ep->seq;
34550 if (sp->id != fh->fh_seq_id) {
34551 - atomic_inc(&mp->stats.seq_not_found);
34552 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34553 if (f_ctl & FC_FC_END_SEQ) {
34554 /*
34555 * Update sequence_id based on incoming last
34556 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34557
34558 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34559 if (!ep) {
34560 - atomic_inc(&mp->stats.xid_not_found);
34561 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34562 goto out;
34563 }
34564 if (ep->esb_stat & ESB_ST_COMPLETE) {
34565 - atomic_inc(&mp->stats.xid_not_found);
34566 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34567 goto rel;
34568 }
34569 if (ep->rxid == FC_XID_UNKNOWN)
34570 ep->rxid = ntohs(fh->fh_rx_id);
34571 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34572 - atomic_inc(&mp->stats.xid_not_found);
34573 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34574 goto rel;
34575 }
34576 if (ep->did != ntoh24(fh->fh_s_id) &&
34577 ep->did != FC_FID_FLOGI) {
34578 - atomic_inc(&mp->stats.xid_not_found);
34579 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34580 goto rel;
34581 }
34582 sof = fr_sof(fp);
34583 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34584 sp->ssb_stat |= SSB_ST_RESP;
34585 sp->id = fh->fh_seq_id;
34586 } else if (sp->id != fh->fh_seq_id) {
34587 - atomic_inc(&mp->stats.seq_not_found);
34588 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34589 goto rel;
34590 }
34591
34592 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34593 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34594
34595 if (!sp)
34596 - atomic_inc(&mp->stats.xid_not_found);
34597 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34598 else
34599 - atomic_inc(&mp->stats.non_bls_resp);
34600 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34601
34602 fc_frame_free(fp);
34603 }
34604 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34605 index db9238f..4378ed2 100644
34606 --- a/drivers/scsi/libsas/sas_ata.c
34607 +++ b/drivers/scsi/libsas/sas_ata.c
34608 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34609 .postreset = ata_std_postreset,
34610 .error_handler = ata_std_error_handler,
34611 .post_internal_cmd = sas_ata_post_internal,
34612 - .qc_defer = ata_std_qc_defer,
34613 + .qc_defer = ata_std_qc_defer,
34614 .qc_prep = ata_noop_qc_prep,
34615 .qc_issue = sas_ata_qc_issue,
34616 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34617 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34618 index bb4c8e0..f33d849 100644
34619 --- a/drivers/scsi/lpfc/lpfc.h
34620 +++ b/drivers/scsi/lpfc/lpfc.h
34621 @@ -425,7 +425,7 @@ struct lpfc_vport {
34622 struct dentry *debug_nodelist;
34623 struct dentry *vport_debugfs_root;
34624 struct lpfc_debugfs_trc *disc_trc;
34625 - atomic_t disc_trc_cnt;
34626 + atomic_unchecked_t disc_trc_cnt;
34627 #endif
34628 uint8_t stat_data_enabled;
34629 uint8_t stat_data_blocked;
34630 @@ -835,8 +835,8 @@ struct lpfc_hba {
34631 struct timer_list fabric_block_timer;
34632 unsigned long bit_flags;
34633 #define FABRIC_COMANDS_BLOCKED 0
34634 - atomic_t num_rsrc_err;
34635 - atomic_t num_cmd_success;
34636 + atomic_unchecked_t num_rsrc_err;
34637 + atomic_unchecked_t num_cmd_success;
34638 unsigned long last_rsrc_error_time;
34639 unsigned long last_ramp_down_time;
34640 unsigned long last_ramp_up_time;
34641 @@ -866,7 +866,7 @@ struct lpfc_hba {
34642
34643 struct dentry *debug_slow_ring_trc;
34644 struct lpfc_debugfs_trc *slow_ring_trc;
34645 - atomic_t slow_ring_trc_cnt;
34646 + atomic_unchecked_t slow_ring_trc_cnt;
34647 /* iDiag debugfs sub-directory */
34648 struct dentry *idiag_root;
34649 struct dentry *idiag_pci_cfg;
34650 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34651 index 2838259..a07cfb5 100644
34652 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
34653 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34654 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34655
34656 #include <linux/debugfs.h>
34657
34658 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34659 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34660 static unsigned long lpfc_debugfs_start_time = 0L;
34661
34662 /* iDiag */
34663 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34664 lpfc_debugfs_enable = 0;
34665
34666 len = 0;
34667 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34668 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34669 (lpfc_debugfs_max_disc_trc - 1);
34670 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34671 dtp = vport->disc_trc + i;
34672 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34673 lpfc_debugfs_enable = 0;
34674
34675 len = 0;
34676 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34677 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34678 (lpfc_debugfs_max_slow_ring_trc - 1);
34679 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34680 dtp = phba->slow_ring_trc + i;
34681 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34682 !vport || !vport->disc_trc)
34683 return;
34684
34685 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34686 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34687 (lpfc_debugfs_max_disc_trc - 1);
34688 dtp = vport->disc_trc + index;
34689 dtp->fmt = fmt;
34690 dtp->data1 = data1;
34691 dtp->data2 = data2;
34692 dtp->data3 = data3;
34693 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34694 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34695 dtp->jif = jiffies;
34696 #endif
34697 return;
34698 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34699 !phba || !phba->slow_ring_trc)
34700 return;
34701
34702 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34703 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34704 (lpfc_debugfs_max_slow_ring_trc - 1);
34705 dtp = phba->slow_ring_trc + index;
34706 dtp->fmt = fmt;
34707 dtp->data1 = data1;
34708 dtp->data2 = data2;
34709 dtp->data3 = data3;
34710 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34711 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34712 dtp->jif = jiffies;
34713 #endif
34714 return;
34715 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34716 "slow_ring buffer\n");
34717 goto debug_failed;
34718 }
34719 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34720 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34721 memset(phba->slow_ring_trc, 0,
34722 (sizeof(struct lpfc_debugfs_trc) *
34723 lpfc_debugfs_max_slow_ring_trc));
34724 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34725 "buffer\n");
34726 goto debug_failed;
34727 }
34728 - atomic_set(&vport->disc_trc_cnt, 0);
34729 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34730
34731 snprintf(name, sizeof(name), "discovery_trace");
34732 vport->debug_disc_trc =
34733 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34734 index 55bc4fc..a2a109c 100644
34735 --- a/drivers/scsi/lpfc/lpfc_init.c
34736 +++ b/drivers/scsi/lpfc/lpfc_init.c
34737 @@ -10027,8 +10027,10 @@ lpfc_init(void)
34738 printk(LPFC_COPYRIGHT "\n");
34739
34740 if (lpfc_enable_npiv) {
34741 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34742 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34743 + pax_open_kernel();
34744 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34745 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34746 + pax_close_kernel();
34747 }
34748 lpfc_transport_template =
34749 fc_attach_transport(&lpfc_transport_functions);
34750 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34751 index 2e1e54e..1af0a0d 100644
34752 --- a/drivers/scsi/lpfc/lpfc_scsi.c
34753 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
34754 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34755 uint32_t evt_posted;
34756
34757 spin_lock_irqsave(&phba->hbalock, flags);
34758 - atomic_inc(&phba->num_rsrc_err);
34759 + atomic_inc_unchecked(&phba->num_rsrc_err);
34760 phba->last_rsrc_error_time = jiffies;
34761
34762 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34763 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34764 unsigned long flags;
34765 struct lpfc_hba *phba = vport->phba;
34766 uint32_t evt_posted;
34767 - atomic_inc(&phba->num_cmd_success);
34768 + atomic_inc_unchecked(&phba->num_cmd_success);
34769
34770 if (vport->cfg_lun_queue_depth <= queue_depth)
34771 return;
34772 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34773 unsigned long num_rsrc_err, num_cmd_success;
34774 int i;
34775
34776 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34777 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34778 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34779 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34780
34781 vports = lpfc_create_vport_work_array(phba);
34782 if (vports != NULL)
34783 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34784 }
34785 }
34786 lpfc_destroy_vport_work_array(phba, vports);
34787 - atomic_set(&phba->num_rsrc_err, 0);
34788 - atomic_set(&phba->num_cmd_success, 0);
34789 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34790 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34791 }
34792
34793 /**
34794 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34795 }
34796 }
34797 lpfc_destroy_vport_work_array(phba, vports);
34798 - atomic_set(&phba->num_rsrc_err, 0);
34799 - atomic_set(&phba->num_cmd_success, 0);
34800 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34801 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34802 }
34803
34804 /**
34805 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34806 index 5163edb..7b142bc 100644
34807 --- a/drivers/scsi/pmcraid.c
34808 +++ b/drivers/scsi/pmcraid.c
34809 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34810 res->scsi_dev = scsi_dev;
34811 scsi_dev->hostdata = res;
34812 res->change_detected = 0;
34813 - atomic_set(&res->read_failures, 0);
34814 - atomic_set(&res->write_failures, 0);
34815 + atomic_set_unchecked(&res->read_failures, 0);
34816 + atomic_set_unchecked(&res->write_failures, 0);
34817 rc = 0;
34818 }
34819 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34820 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34821
34822 /* If this was a SCSI read/write command keep count of errors */
34823 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34824 - atomic_inc(&res->read_failures);
34825 + atomic_inc_unchecked(&res->read_failures);
34826 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34827 - atomic_inc(&res->write_failures);
34828 + atomic_inc_unchecked(&res->write_failures);
34829
34830 if (!RES_IS_GSCSI(res->cfg_entry) &&
34831 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34832 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34833 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34834 * hrrq_id assigned here in queuecommand
34835 */
34836 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34837 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34838 pinstance->num_hrrq;
34839 cmd->cmd_done = pmcraid_io_done;
34840
34841 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34842 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34843 * hrrq_id assigned here in queuecommand
34844 */
34845 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34846 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34847 pinstance->num_hrrq;
34848
34849 if (request_size) {
34850 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34851
34852 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34853 /* add resources only after host is added into system */
34854 - if (!atomic_read(&pinstance->expose_resources))
34855 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34856 return;
34857
34858 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34859 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34860 init_waitqueue_head(&pinstance->reset_wait_q);
34861
34862 atomic_set(&pinstance->outstanding_cmds, 0);
34863 - atomic_set(&pinstance->last_message_id, 0);
34864 - atomic_set(&pinstance->expose_resources, 0);
34865 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34866 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34867
34868 INIT_LIST_HEAD(&pinstance->free_res_q);
34869 INIT_LIST_HEAD(&pinstance->used_res_q);
34870 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34871 /* Schedule worker thread to handle CCN and take care of adding and
34872 * removing devices to OS
34873 */
34874 - atomic_set(&pinstance->expose_resources, 1);
34875 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34876 schedule_work(&pinstance->worker_q);
34877 return rc;
34878
34879 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34880 index ca496c7..9c791d5 100644
34881 --- a/drivers/scsi/pmcraid.h
34882 +++ b/drivers/scsi/pmcraid.h
34883 @@ -748,7 +748,7 @@ struct pmcraid_instance {
34884 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34885
34886 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34887 - atomic_t last_message_id;
34888 + atomic_unchecked_t last_message_id;
34889
34890 /* configuration table */
34891 struct pmcraid_config_table *cfg_table;
34892 @@ -777,7 +777,7 @@ struct pmcraid_instance {
34893 atomic_t outstanding_cmds;
34894
34895 /* should add/delete resources to mid-layer now ?*/
34896 - atomic_t expose_resources;
34897 + atomic_unchecked_t expose_resources;
34898
34899
34900
34901 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34902 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34903 };
34904 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34905 - atomic_t read_failures; /* count of failed READ commands */
34906 - atomic_t write_failures; /* count of failed WRITE commands */
34907 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34908 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34909
34910 /* To indicate add/delete/modify during CCN */
34911 u8 change_detected;
34912 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34913 index fcf052c..a8025a4 100644
34914 --- a/drivers/scsi/qla2xxx/qla_def.h
34915 +++ b/drivers/scsi/qla2xxx/qla_def.h
34916 @@ -2244,7 +2244,7 @@ struct isp_operations {
34917 int (*get_flash_version) (struct scsi_qla_host *, void *);
34918 int (*start_scsi) (srb_t *);
34919 int (*abort_isp) (struct scsi_qla_host *);
34920 -};
34921 +} __no_const;
34922
34923 /* MSI-X Support *************************************************************/
34924
34925 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34926 index fd5edc6..4906148 100644
34927 --- a/drivers/scsi/qla4xxx/ql4_def.h
34928 +++ b/drivers/scsi/qla4xxx/ql4_def.h
34929 @@ -258,7 +258,7 @@ struct ddb_entry {
34930 * (4000 only) */
34931 atomic_t relogin_timer; /* Max Time to wait for
34932 * relogin to complete */
34933 - atomic_t relogin_retry_count; /* Num of times relogin has been
34934 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34935 * retried */
34936 uint32_t default_time2wait; /* Default Min time between
34937 * relogins (+aens) */
34938 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34939 index 4169c8b..a8b896b 100644
34940 --- a/drivers/scsi/qla4xxx/ql4_os.c
34941 +++ b/drivers/scsi/qla4xxx/ql4_os.c
34942 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34943 */
34944 if (!iscsi_is_session_online(cls_sess)) {
34945 /* Reset retry relogin timer */
34946 - atomic_inc(&ddb_entry->relogin_retry_count);
34947 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34948 DEBUG2(ql4_printk(KERN_INFO, ha,
34949 "%s: index[%d] relogin timed out-retrying"
34950 " relogin (%d), retry (%d)\n", __func__,
34951 ddb_entry->fw_ddb_index,
34952 - atomic_read(&ddb_entry->relogin_retry_count),
34953 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34954 ddb_entry->default_time2wait + 4));
34955 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34956 atomic_set(&ddb_entry->retry_relogin_timer,
34957 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34958
34959 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34960 atomic_set(&ddb_entry->relogin_timer, 0);
34961 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34962 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34963
34964 ddb_entry->default_relogin_timeout =
34965 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34966 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34967 index 2aeb2e9..46e3925 100644
34968 --- a/drivers/scsi/scsi.c
34969 +++ b/drivers/scsi/scsi.c
34970 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34971 unsigned long timeout;
34972 int rtn = 0;
34973
34974 - atomic_inc(&cmd->device->iorequest_cnt);
34975 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34976
34977 /* check if the device is still usable */
34978 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34979 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34980 index f85cfa6..a57c9e8 100644
34981 --- a/drivers/scsi/scsi_lib.c
34982 +++ b/drivers/scsi/scsi_lib.c
34983 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34984 shost = sdev->host;
34985 scsi_init_cmd_errh(cmd);
34986 cmd->result = DID_NO_CONNECT << 16;
34987 - atomic_inc(&cmd->device->iorequest_cnt);
34988 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34989
34990 /*
34991 * SCSI request completion path will do scsi_device_unbusy(),
34992 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34993
34994 INIT_LIST_HEAD(&cmd->eh_entry);
34995
34996 - atomic_inc(&cmd->device->iodone_cnt);
34997 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34998 if (cmd->result)
34999 - atomic_inc(&cmd->device->ioerr_cnt);
35000 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
35001
35002 disposition = scsi_decide_disposition(cmd);
35003 if (disposition != SUCCESS &&
35004 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
35005 index 04c2a27..9d8bd66 100644
35006 --- a/drivers/scsi/scsi_sysfs.c
35007 +++ b/drivers/scsi/scsi_sysfs.c
35008 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
35009 char *buf) \
35010 { \
35011 struct scsi_device *sdev = to_scsi_device(dev); \
35012 - unsigned long long count = atomic_read(&sdev->field); \
35013 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
35014 return snprintf(buf, 20, "0x%llx\n", count); \
35015 } \
35016 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
35017 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
35018 index 84a1fdf..693b0d6 100644
35019 --- a/drivers/scsi/scsi_tgt_lib.c
35020 +++ b/drivers/scsi/scsi_tgt_lib.c
35021 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
35022 int err;
35023
35024 dprintk("%lx %u\n", uaddr, len);
35025 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
35026 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
35027 if (err) {
35028 /*
35029 * TODO: need to fixup sg_tablesize, max_segment_size,
35030 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
35031 index 1b21491..1b7f60e 100644
35032 --- a/drivers/scsi/scsi_transport_fc.c
35033 +++ b/drivers/scsi/scsi_transport_fc.c
35034 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
35035 * Netlink Infrastructure
35036 */
35037
35038 -static atomic_t fc_event_seq;
35039 +static atomic_unchecked_t fc_event_seq;
35040
35041 /**
35042 * fc_get_event_number - Obtain the next sequential FC event number
35043 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
35044 u32
35045 fc_get_event_number(void)
35046 {
35047 - return atomic_add_return(1, &fc_event_seq);
35048 + return atomic_add_return_unchecked(1, &fc_event_seq);
35049 }
35050 EXPORT_SYMBOL(fc_get_event_number);
35051
35052 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
35053 {
35054 int error;
35055
35056 - atomic_set(&fc_event_seq, 0);
35057 + atomic_set_unchecked(&fc_event_seq, 0);
35058
35059 error = transport_class_register(&fc_host_class);
35060 if (error)
35061 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
35062 char *cp;
35063
35064 *val = simple_strtoul(buf, &cp, 0);
35065 - if ((*cp && (*cp != '\n')) || (*val < 0))
35066 + if (*cp && (*cp != '\n'))
35067 return -EINVAL;
35068 /*
35069 * Check for overflow; dev_loss_tmo is u32
35070 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
35071 index 96029e6..4d77fa0 100644
35072 --- a/drivers/scsi/scsi_transport_iscsi.c
35073 +++ b/drivers/scsi/scsi_transport_iscsi.c
35074 @@ -79,7 +79,7 @@ struct iscsi_internal {
35075 struct transport_container session_cont;
35076 };
35077
35078 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
35079 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
35080 static struct workqueue_struct *iscsi_eh_timer_workq;
35081
35082 static DEFINE_IDA(iscsi_sess_ida);
35083 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
35084 int err;
35085
35086 ihost = shost->shost_data;
35087 - session->sid = atomic_add_return(1, &iscsi_session_nr);
35088 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
35089
35090 if (target_id == ISCSI_MAX_TARGET) {
35091 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
35092 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
35093 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
35094 ISCSI_TRANSPORT_VERSION);
35095
35096 - atomic_set(&iscsi_session_nr, 0);
35097 + atomic_set_unchecked(&iscsi_session_nr, 0);
35098
35099 err = class_register(&iscsi_transport_class);
35100 if (err)
35101 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
35102 index 21a045e..ec89e03 100644
35103 --- a/drivers/scsi/scsi_transport_srp.c
35104 +++ b/drivers/scsi/scsi_transport_srp.c
35105 @@ -33,7 +33,7 @@
35106 #include "scsi_transport_srp_internal.h"
35107
35108 struct srp_host_attrs {
35109 - atomic_t next_port_id;
35110 + atomic_unchecked_t next_port_id;
35111 };
35112 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
35113
35114 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
35115 struct Scsi_Host *shost = dev_to_shost(dev);
35116 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
35117
35118 - atomic_set(&srp_host->next_port_id, 0);
35119 + atomic_set_unchecked(&srp_host->next_port_id, 0);
35120 return 0;
35121 }
35122
35123 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
35124 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
35125 rport->roles = ids->roles;
35126
35127 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
35128 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
35129 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
35130
35131 transport_setup_device(&rport->dev);
35132 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
35133 index 441a1c5..07cece7 100644
35134 --- a/drivers/scsi/sg.c
35135 +++ b/drivers/scsi/sg.c
35136 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
35137 sdp->disk->disk_name,
35138 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
35139 NULL,
35140 - (char *)arg);
35141 + (char __user *)arg);
35142 case BLKTRACESTART:
35143 return blk_trace_startstop(sdp->device->request_queue, 1);
35144 case BLKTRACESTOP:
35145 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
35146 const struct file_operations * fops;
35147 };
35148
35149 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
35150 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
35151 {"allow_dio", &adio_fops},
35152 {"debug", &debug_fops},
35153 {"def_reserved_size", &dressz_fops},
35154 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
35155 {
35156 int k, mask;
35157 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
35158 - struct sg_proc_leaf * leaf;
35159 + const struct sg_proc_leaf * leaf;
35160
35161 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
35162 if (!sg_proc_sgp)
35163 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
35164 index f64250e..1ee3049 100644
35165 --- a/drivers/spi/spi-dw-pci.c
35166 +++ b/drivers/spi/spi-dw-pci.c
35167 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
35168 #define spi_resume NULL
35169 #endif
35170
35171 -static const struct pci_device_id pci_ids[] __devinitdata = {
35172 +static const struct pci_device_id pci_ids[] __devinitconst = {
35173 /* Intel MID platform SPI controller 0 */
35174 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
35175 {},
35176 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
35177 index 77eae99..b7cdcc9 100644
35178 --- a/drivers/spi/spi.c
35179 +++ b/drivers/spi/spi.c
35180 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
35181 EXPORT_SYMBOL_GPL(spi_bus_unlock);
35182
35183 /* portable code must never pass more than 32 bytes */
35184 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
35185 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
35186
35187 static u8 *buf;
35188
35189 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
35190 index 436fe97..4082570 100644
35191 --- a/drivers/staging/gma500/power.c
35192 +++ b/drivers/staging/gma500/power.c
35193 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
35194 ret = gma_resume_pci(dev->pdev);
35195 if (ret == 0) {
35196 /* FIXME: we want to defer this for Medfield/Oaktrail */
35197 - gma_resume_display(dev);
35198 + gma_resume_display(dev->pdev);
35199 psb_irq_preinstall(dev);
35200 psb_irq_postinstall(dev);
35201 pm_runtime_get(&dev->pdev->dev);
35202 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
35203 index bafccb3..e3ac78d 100644
35204 --- a/drivers/staging/hv/rndis_filter.c
35205 +++ b/drivers/staging/hv/rndis_filter.c
35206 @@ -42,7 +42,7 @@ struct rndis_device {
35207
35208 enum rndis_device_state state;
35209 bool link_state;
35210 - atomic_t new_req_id;
35211 + atomic_unchecked_t new_req_id;
35212
35213 spinlock_t request_lock;
35214 struct list_head req_list;
35215 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35216 * template
35217 */
35218 set = &rndis_msg->msg.set_req;
35219 - set->req_id = atomic_inc_return(&dev->new_req_id);
35220 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35221
35222 /* Add to the request list */
35223 spin_lock_irqsave(&dev->request_lock, flags);
35224 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35225
35226 /* Setup the rndis set */
35227 halt = &request->request_msg.msg.halt_req;
35228 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35229 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35230
35231 /* Ignore return since this msg is optional. */
35232 rndis_filter_send_request(dev, request);
35233 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35234 index 9e8f010..af9efb5 100644
35235 --- a/drivers/staging/iio/buffer_generic.h
35236 +++ b/drivers/staging/iio/buffer_generic.h
35237 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35238
35239 int (*is_enabled)(struct iio_buffer *buffer);
35240 int (*enable)(struct iio_buffer *buffer);
35241 -};
35242 +} __no_const;
35243
35244 /**
35245 * struct iio_buffer_setup_ops - buffer setup related callbacks
35246 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35247 index 8b307b4..a97ac91 100644
35248 --- a/drivers/staging/octeon/ethernet-rx.c
35249 +++ b/drivers/staging/octeon/ethernet-rx.c
35250 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35251 /* Increment RX stats for virtual ports */
35252 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35253 #ifdef CONFIG_64BIT
35254 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35255 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35256 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35257 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35258 #else
35259 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35260 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35261 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35262 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35263 #endif
35264 }
35265 netif_receive_skb(skb);
35266 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35267 dev->name);
35268 */
35269 #ifdef CONFIG_64BIT
35270 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35271 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35272 #else
35273 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35274 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35275 #endif
35276 dev_kfree_skb_irq(skb);
35277 }
35278 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35279 index 076f866..2308070 100644
35280 --- a/drivers/staging/octeon/ethernet.c
35281 +++ b/drivers/staging/octeon/ethernet.c
35282 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35283 * since the RX tasklet also increments it.
35284 */
35285 #ifdef CONFIG_64BIT
35286 - atomic64_add(rx_status.dropped_packets,
35287 - (atomic64_t *)&priv->stats.rx_dropped);
35288 + atomic64_add_unchecked(rx_status.dropped_packets,
35289 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35290 #else
35291 - atomic_add(rx_status.dropped_packets,
35292 - (atomic_t *)&priv->stats.rx_dropped);
35293 + atomic_add_unchecked(rx_status.dropped_packets,
35294 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35295 #endif
35296 }
35297
35298 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35299 index 7a19555..466456d 100644
35300 --- a/drivers/staging/pohmelfs/inode.c
35301 +++ b/drivers/staging/pohmelfs/inode.c
35302 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35303 mutex_init(&psb->mcache_lock);
35304 psb->mcache_root = RB_ROOT;
35305 psb->mcache_timeout = msecs_to_jiffies(5000);
35306 - atomic_long_set(&psb->mcache_gen, 0);
35307 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35308
35309 psb->trans_max_pages = 100;
35310
35311 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35312 INIT_LIST_HEAD(&psb->crypto_ready_list);
35313 INIT_LIST_HEAD(&psb->crypto_active_list);
35314
35315 - atomic_set(&psb->trans_gen, 1);
35316 + atomic_set_unchecked(&psb->trans_gen, 1);
35317 atomic_long_set(&psb->total_inodes, 0);
35318
35319 mutex_init(&psb->state_lock);
35320 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35321 index e22665c..a2a9390 100644
35322 --- a/drivers/staging/pohmelfs/mcache.c
35323 +++ b/drivers/staging/pohmelfs/mcache.c
35324 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35325 m->data = data;
35326 m->start = start;
35327 m->size = size;
35328 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35329 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35330
35331 mutex_lock(&psb->mcache_lock);
35332 err = pohmelfs_mcache_insert(psb, m);
35333 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35334 index 985b6b7..7699e05 100644
35335 --- a/drivers/staging/pohmelfs/netfs.h
35336 +++ b/drivers/staging/pohmelfs/netfs.h
35337 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35338 struct pohmelfs_sb {
35339 struct rb_root mcache_root;
35340 struct mutex mcache_lock;
35341 - atomic_long_t mcache_gen;
35342 + atomic_long_unchecked_t mcache_gen;
35343 unsigned long mcache_timeout;
35344
35345 unsigned int idx;
35346
35347 unsigned int trans_retries;
35348
35349 - atomic_t trans_gen;
35350 + atomic_unchecked_t trans_gen;
35351
35352 unsigned int crypto_attached_size;
35353 unsigned int crypto_align_size;
35354 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35355 index 06c1a74..866eebc 100644
35356 --- a/drivers/staging/pohmelfs/trans.c
35357 +++ b/drivers/staging/pohmelfs/trans.c
35358 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35359 int err;
35360 struct netfs_cmd *cmd = t->iovec.iov_base;
35361
35362 - t->gen = atomic_inc_return(&psb->trans_gen);
35363 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35364
35365 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35366 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35367 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35368 index 86308a0..feaa925 100644
35369 --- a/drivers/staging/rtl8712/rtl871x_io.h
35370 +++ b/drivers/staging/rtl8712/rtl871x_io.h
35371 @@ -108,7 +108,7 @@ struct _io_ops {
35372 u8 *pmem);
35373 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35374 u8 *pmem);
35375 -};
35376 +} __no_const;
35377
35378 struct io_req {
35379 struct list_head list;
35380 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35381 index c7b5e8b..783d6cb 100644
35382 --- a/drivers/staging/sbe-2t3e3/netdev.c
35383 +++ b/drivers/staging/sbe-2t3e3/netdev.c
35384 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35385 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35386
35387 if (rlen)
35388 - if (copy_to_user(data, &resp, rlen))
35389 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35390 return -EFAULT;
35391
35392 return 0;
35393 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35394 index be21617..0954e45 100644
35395 --- a/drivers/staging/usbip/usbip_common.h
35396 +++ b/drivers/staging/usbip/usbip_common.h
35397 @@ -289,7 +289,7 @@ struct usbip_device {
35398 void (*shutdown)(struct usbip_device *);
35399 void (*reset)(struct usbip_device *);
35400 void (*unusable)(struct usbip_device *);
35401 - } eh_ops;
35402 + } __no_const eh_ops;
35403 };
35404
35405 #if 0
35406 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35407 index 88b3298..3783eee 100644
35408 --- a/drivers/staging/usbip/vhci.h
35409 +++ b/drivers/staging/usbip/vhci.h
35410 @@ -88,7 +88,7 @@ struct vhci_hcd {
35411 unsigned resuming:1;
35412 unsigned long re_timeout;
35413
35414 - atomic_t seqnum;
35415 + atomic_unchecked_t seqnum;
35416
35417 /*
35418 * NOTE:
35419 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35420 index 2ee97e2..0420b86 100644
35421 --- a/drivers/staging/usbip/vhci_hcd.c
35422 +++ b/drivers/staging/usbip/vhci_hcd.c
35423 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35424 return;
35425 }
35426
35427 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35428 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35429 if (priv->seqnum == 0xffff)
35430 dev_info(&urb->dev->dev, "seqnum max\n");
35431
35432 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35433 return -ENOMEM;
35434 }
35435
35436 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35437 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35438 if (unlink->seqnum == 0xffff)
35439 pr_info("seqnum max\n");
35440
35441 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35442 vdev->rhport = rhport;
35443 }
35444
35445 - atomic_set(&vhci->seqnum, 0);
35446 + atomic_set_unchecked(&vhci->seqnum, 0);
35447 spin_lock_init(&vhci->lock);
35448
35449 hcd->power_budget = 0; /* no limit */
35450 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35451 index 3872b8c..fe6d2f4 100644
35452 --- a/drivers/staging/usbip/vhci_rx.c
35453 +++ b/drivers/staging/usbip/vhci_rx.c
35454 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35455 if (!urb) {
35456 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35457 pr_info("max seqnum %d\n",
35458 - atomic_read(&the_controller->seqnum));
35459 + atomic_read_unchecked(&the_controller->seqnum));
35460 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35461 return;
35462 }
35463 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35464 index 7735027..30eed13 100644
35465 --- a/drivers/staging/vt6655/hostap.c
35466 +++ b/drivers/staging/vt6655/hostap.c
35467 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35468 *
35469 */
35470
35471 +static net_device_ops_no_const apdev_netdev_ops;
35472 +
35473 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35474 {
35475 PSDevice apdev_priv;
35476 struct net_device *dev = pDevice->dev;
35477 int ret;
35478 - const struct net_device_ops apdev_netdev_ops = {
35479 - .ndo_start_xmit = pDevice->tx_80211,
35480 - };
35481
35482 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35483
35484 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35485 *apdev_priv = *pDevice;
35486 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35487
35488 + /* only half broken now */
35489 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35490 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35491
35492 pDevice->apdev->type = ARPHRD_IEEE80211;
35493 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35494 index 51b5adf..098e320 100644
35495 --- a/drivers/staging/vt6656/hostap.c
35496 +++ b/drivers/staging/vt6656/hostap.c
35497 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35498 *
35499 */
35500
35501 +static net_device_ops_no_const apdev_netdev_ops;
35502 +
35503 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35504 {
35505 PSDevice apdev_priv;
35506 struct net_device *dev = pDevice->dev;
35507 int ret;
35508 - const struct net_device_ops apdev_netdev_ops = {
35509 - .ndo_start_xmit = pDevice->tx_80211,
35510 - };
35511
35512 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35513
35514 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35515 *apdev_priv = *pDevice;
35516 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35517
35518 + /* only half broken now */
35519 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35520 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35521
35522 pDevice->apdev->type = ARPHRD_IEEE80211;
35523 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35524 index 7843dfd..3db105f 100644
35525 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
35526 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35527 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35528
35529 struct usbctlx_completor {
35530 int (*complete) (struct usbctlx_completor *);
35531 -};
35532 +} __no_const;
35533
35534 static int
35535 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35536 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35537 index 1ca66ea..76f1343 100644
35538 --- a/drivers/staging/zcache/tmem.c
35539 +++ b/drivers/staging/zcache/tmem.c
35540 @@ -39,7 +39,7 @@
35541 * A tmem host implementation must use this function to register callbacks
35542 * for memory allocation.
35543 */
35544 -static struct tmem_hostops tmem_hostops;
35545 +static tmem_hostops_no_const tmem_hostops;
35546
35547 static void tmem_objnode_tree_init(void);
35548
35549 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35550 * A tmem host implementation must use this function to register
35551 * callbacks for a page-accessible memory (PAM) implementation
35552 */
35553 -static struct tmem_pamops tmem_pamops;
35554 +static tmem_pamops_no_const tmem_pamops;
35555
35556 void tmem_register_pamops(struct tmem_pamops *m)
35557 {
35558 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35559 index ed147c4..94fc3c6 100644
35560 --- a/drivers/staging/zcache/tmem.h
35561 +++ b/drivers/staging/zcache/tmem.h
35562 @@ -180,6 +180,7 @@ struct tmem_pamops {
35563 void (*new_obj)(struct tmem_obj *);
35564 int (*replace_in_obj)(void *, struct tmem_obj *);
35565 };
35566 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35567 extern void tmem_register_pamops(struct tmem_pamops *m);
35568
35569 /* memory allocation methods provided by the host implementation */
35570 @@ -189,6 +190,7 @@ struct tmem_hostops {
35571 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35572 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35573 };
35574 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35575 extern void tmem_register_hostops(struct tmem_hostops *m);
35576
35577 /* core tmem accessor functions */
35578 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35579 index 0c1d5c73..88e90a8 100644
35580 --- a/drivers/target/iscsi/iscsi_target.c
35581 +++ b/drivers/target/iscsi/iscsi_target.c
35582 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35583 * outstanding_r2ts reaches zero, go ahead and send the delayed
35584 * TASK_ABORTED status.
35585 */
35586 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35587 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35588 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35589 if (--cmd->outstanding_r2ts < 1) {
35590 iscsit_stop_dataout_timer(cmd);
35591 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35592 index 6845228..df77141 100644
35593 --- a/drivers/target/target_core_tmr.c
35594 +++ b/drivers/target/target_core_tmr.c
35595 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35596 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35597 cmd->t_task_list_num,
35598 atomic_read(&cmd->t_task_cdbs_left),
35599 - atomic_read(&cmd->t_task_cdbs_sent),
35600 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35601 atomic_read(&cmd->t_transport_active),
35602 atomic_read(&cmd->t_transport_stop),
35603 atomic_read(&cmd->t_transport_sent));
35604 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35605 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35606 " task: %p, t_fe_count: %d dev: %p\n", task,
35607 fe_count, dev);
35608 - atomic_set(&cmd->t_transport_aborted, 1);
35609 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35610 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35611
35612 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35613 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35614 }
35615 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35616 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35617 - atomic_set(&cmd->t_transport_aborted, 1);
35618 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35619 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35620
35621 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35622 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35623 index 861628e..659ae80 100644
35624 --- a/drivers/target/target_core_transport.c
35625 +++ b/drivers/target/target_core_transport.c
35626 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35627
35628 dev->queue_depth = dev_limits->queue_depth;
35629 atomic_set(&dev->depth_left, dev->queue_depth);
35630 - atomic_set(&dev->dev_ordered_id, 0);
35631 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35632
35633 se_dev_set_default_attribs(dev, dev_limits);
35634
35635 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35636 * Used to determine when ORDERED commands should go from
35637 * Dormant to Active status.
35638 */
35639 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35640 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35641 smp_mb__after_atomic_inc();
35642 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35643 cmd->se_ordered_id, cmd->sam_task_attr,
35644 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35645 " t_transport_active: %d t_transport_stop: %d"
35646 " t_transport_sent: %d\n", cmd->t_task_list_num,
35647 atomic_read(&cmd->t_task_cdbs_left),
35648 - atomic_read(&cmd->t_task_cdbs_sent),
35649 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35650 atomic_read(&cmd->t_task_cdbs_ex_left),
35651 atomic_read(&cmd->t_transport_active),
35652 atomic_read(&cmd->t_transport_stop),
35653 @@ -2089,9 +2089,9 @@ check_depth:
35654
35655 spin_lock_irqsave(&cmd->t_state_lock, flags);
35656 task->task_flags |= (TF_ACTIVE | TF_SENT);
35657 - atomic_inc(&cmd->t_task_cdbs_sent);
35658 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35659
35660 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
35661 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35662 cmd->t_task_list_num)
35663 atomic_set(&cmd->t_transport_sent, 1);
35664
35665 @@ -4273,7 +4273,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35666 atomic_set(&cmd->transport_lun_stop, 0);
35667 }
35668 if (!atomic_read(&cmd->t_transport_active) ||
35669 - atomic_read(&cmd->t_transport_aborted)) {
35670 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
35671 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35672 return false;
35673 }
35674 @@ -4522,7 +4522,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35675 {
35676 int ret = 0;
35677
35678 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
35679 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35680 if (!send_status ||
35681 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35682 return 1;
35683 @@ -4559,7 +4559,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35684 */
35685 if (cmd->data_direction == DMA_TO_DEVICE) {
35686 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35687 - atomic_inc(&cmd->t_transport_aborted);
35688 + atomic_inc_unchecked(&cmd->t_transport_aborted);
35689 smp_mb__after_atomic_inc();
35690 }
35691 }
35692 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35693 index b9040be..e3f5aab 100644
35694 --- a/drivers/tty/hvc/hvcs.c
35695 +++ b/drivers/tty/hvc/hvcs.c
35696 @@ -83,6 +83,7 @@
35697 #include <asm/hvcserver.h>
35698 #include <asm/uaccess.h>
35699 #include <asm/vio.h>
35700 +#include <asm/local.h>
35701
35702 /*
35703 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35704 @@ -270,7 +271,7 @@ struct hvcs_struct {
35705 unsigned int index;
35706
35707 struct tty_struct *tty;
35708 - int open_count;
35709 + local_t open_count;
35710
35711 /*
35712 * Used to tell the driver kernel_thread what operations need to take
35713 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35714
35715 spin_lock_irqsave(&hvcsd->lock, flags);
35716
35717 - if (hvcsd->open_count > 0) {
35718 + if (local_read(&hvcsd->open_count) > 0) {
35719 spin_unlock_irqrestore(&hvcsd->lock, flags);
35720 printk(KERN_INFO "HVCS: vterm state unchanged. "
35721 "The hvcs device node is still in use.\n");
35722 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35723 if ((retval = hvcs_partner_connect(hvcsd)))
35724 goto error_release;
35725
35726 - hvcsd->open_count = 1;
35727 + local_set(&hvcsd->open_count, 1);
35728 hvcsd->tty = tty;
35729 tty->driver_data = hvcsd;
35730
35731 @@ -1179,7 +1180,7 @@ fast_open:
35732
35733 spin_lock_irqsave(&hvcsd->lock, flags);
35734 kref_get(&hvcsd->kref);
35735 - hvcsd->open_count++;
35736 + local_inc(&hvcsd->open_count);
35737 hvcsd->todo_mask |= HVCS_SCHED_READ;
35738 spin_unlock_irqrestore(&hvcsd->lock, flags);
35739
35740 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35741 hvcsd = tty->driver_data;
35742
35743 spin_lock_irqsave(&hvcsd->lock, flags);
35744 - if (--hvcsd->open_count == 0) {
35745 + if (local_dec_and_test(&hvcsd->open_count)) {
35746
35747 vio_disable_interrupts(hvcsd->vdev);
35748
35749 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35750 free_irq(irq, hvcsd);
35751 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35752 return;
35753 - } else if (hvcsd->open_count < 0) {
35754 + } else if (local_read(&hvcsd->open_count) < 0) {
35755 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35756 " is missmanaged.\n",
35757 - hvcsd->vdev->unit_address, hvcsd->open_count);
35758 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35759 }
35760
35761 spin_unlock_irqrestore(&hvcsd->lock, flags);
35762 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35763
35764 spin_lock_irqsave(&hvcsd->lock, flags);
35765 /* Preserve this so that we know how many kref refs to put */
35766 - temp_open_count = hvcsd->open_count;
35767 + temp_open_count = local_read(&hvcsd->open_count);
35768
35769 /*
35770 * Don't kref put inside the spinlock because the destruction
35771 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35772 hvcsd->tty->driver_data = NULL;
35773 hvcsd->tty = NULL;
35774
35775 - hvcsd->open_count = 0;
35776 + local_set(&hvcsd->open_count, 0);
35777
35778 /* This will drop any buffered data on the floor which is OK in a hangup
35779 * scenario. */
35780 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35781 * the middle of a write operation? This is a crummy place to do this
35782 * but we want to keep it all in the spinlock.
35783 */
35784 - if (hvcsd->open_count <= 0) {
35785 + if (local_read(&hvcsd->open_count) <= 0) {
35786 spin_unlock_irqrestore(&hvcsd->lock, flags);
35787 return -ENODEV;
35788 }
35789 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35790 {
35791 struct hvcs_struct *hvcsd = tty->driver_data;
35792
35793 - if (!hvcsd || hvcsd->open_count <= 0)
35794 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35795 return 0;
35796
35797 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35798 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35799 index ef92869..f4ebd88 100644
35800 --- a/drivers/tty/ipwireless/tty.c
35801 +++ b/drivers/tty/ipwireless/tty.c
35802 @@ -29,6 +29,7 @@
35803 #include <linux/tty_driver.h>
35804 #include <linux/tty_flip.h>
35805 #include <linux/uaccess.h>
35806 +#include <asm/local.h>
35807
35808 #include "tty.h"
35809 #include "network.h"
35810 @@ -51,7 +52,7 @@ struct ipw_tty {
35811 int tty_type;
35812 struct ipw_network *network;
35813 struct tty_struct *linux_tty;
35814 - int open_count;
35815 + local_t open_count;
35816 unsigned int control_lines;
35817 struct mutex ipw_tty_mutex;
35818 int tx_bytes_queued;
35819 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35820 mutex_unlock(&tty->ipw_tty_mutex);
35821 return -ENODEV;
35822 }
35823 - if (tty->open_count == 0)
35824 + if (local_read(&tty->open_count) == 0)
35825 tty->tx_bytes_queued = 0;
35826
35827 - tty->open_count++;
35828 + local_inc(&tty->open_count);
35829
35830 tty->linux_tty = linux_tty;
35831 linux_tty->driver_data = tty;
35832 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35833
35834 static void do_ipw_close(struct ipw_tty *tty)
35835 {
35836 - tty->open_count--;
35837 -
35838 - if (tty->open_count == 0) {
35839 + if (local_dec_return(&tty->open_count) == 0) {
35840 struct tty_struct *linux_tty = tty->linux_tty;
35841
35842 if (linux_tty != NULL) {
35843 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35844 return;
35845
35846 mutex_lock(&tty->ipw_tty_mutex);
35847 - if (tty->open_count == 0) {
35848 + if (local_read(&tty->open_count) == 0) {
35849 mutex_unlock(&tty->ipw_tty_mutex);
35850 return;
35851 }
35852 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35853 return;
35854 }
35855
35856 - if (!tty->open_count) {
35857 + if (!local_read(&tty->open_count)) {
35858 mutex_unlock(&tty->ipw_tty_mutex);
35859 return;
35860 }
35861 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35862 return -ENODEV;
35863
35864 mutex_lock(&tty->ipw_tty_mutex);
35865 - if (!tty->open_count) {
35866 + if (!local_read(&tty->open_count)) {
35867 mutex_unlock(&tty->ipw_tty_mutex);
35868 return -EINVAL;
35869 }
35870 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35871 if (!tty)
35872 return -ENODEV;
35873
35874 - if (!tty->open_count)
35875 + if (!local_read(&tty->open_count))
35876 return -EINVAL;
35877
35878 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35879 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35880 if (!tty)
35881 return 0;
35882
35883 - if (!tty->open_count)
35884 + if (!local_read(&tty->open_count))
35885 return 0;
35886
35887 return tty->tx_bytes_queued;
35888 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35889 if (!tty)
35890 return -ENODEV;
35891
35892 - if (!tty->open_count)
35893 + if (!local_read(&tty->open_count))
35894 return -EINVAL;
35895
35896 return get_control_lines(tty);
35897 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35898 if (!tty)
35899 return -ENODEV;
35900
35901 - if (!tty->open_count)
35902 + if (!local_read(&tty->open_count))
35903 return -EINVAL;
35904
35905 return set_control_lines(tty, set, clear);
35906 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35907 if (!tty)
35908 return -ENODEV;
35909
35910 - if (!tty->open_count)
35911 + if (!local_read(&tty->open_count))
35912 return -EINVAL;
35913
35914 /* FIXME: Exactly how is the tty object locked here .. */
35915 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35916 against a parallel ioctl etc */
35917 mutex_lock(&ttyj->ipw_tty_mutex);
35918 }
35919 - while (ttyj->open_count)
35920 + while (local_read(&ttyj->open_count))
35921 do_ipw_close(ttyj);
35922 ipwireless_disassociate_network_ttys(network,
35923 ttyj->channel_idx);
35924 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35925 index fc7bbba..9527e93 100644
35926 --- a/drivers/tty/n_gsm.c
35927 +++ b/drivers/tty/n_gsm.c
35928 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35929 kref_init(&dlci->ref);
35930 mutex_init(&dlci->mutex);
35931 dlci->fifo = &dlci->_fifo;
35932 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35933 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35934 kfree(dlci);
35935 return NULL;
35936 }
35937 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35938 index 39d6ab6..eb97f41 100644
35939 --- a/drivers/tty/n_tty.c
35940 +++ b/drivers/tty/n_tty.c
35941 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35942 {
35943 *ops = tty_ldisc_N_TTY;
35944 ops->owner = NULL;
35945 - ops->refcount = ops->flags = 0;
35946 + atomic_set(&ops->refcount, 0);
35947 + ops->flags = 0;
35948 }
35949 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35950 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35951 index e18604b..a7d5a11 100644
35952 --- a/drivers/tty/pty.c
35953 +++ b/drivers/tty/pty.c
35954 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35955 register_sysctl_table(pty_root_table);
35956
35957 /* Now create the /dev/ptmx special device */
35958 + pax_open_kernel();
35959 tty_default_fops(&ptmx_fops);
35960 - ptmx_fops.open = ptmx_open;
35961 + *(void **)&ptmx_fops.open = ptmx_open;
35962 + pax_close_kernel();
35963
35964 cdev_init(&ptmx_cdev, &ptmx_fops);
35965 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35966 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35967 index 2b42a01..32a2ed3 100644
35968 --- a/drivers/tty/serial/kgdboc.c
35969 +++ b/drivers/tty/serial/kgdboc.c
35970 @@ -24,8 +24,9 @@
35971 #define MAX_CONFIG_LEN 40
35972
35973 static struct kgdb_io kgdboc_io_ops;
35974 +static struct kgdb_io kgdboc_io_ops_console;
35975
35976 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35977 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35978 static int configured = -1;
35979
35980 static char config[MAX_CONFIG_LEN];
35981 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35982 kgdboc_unregister_kbd();
35983 if (configured == 1)
35984 kgdb_unregister_io_module(&kgdboc_io_ops);
35985 + else if (configured == 2)
35986 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35987 }
35988
35989 static int configure_kgdboc(void)
35990 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35991 int err;
35992 char *cptr = config;
35993 struct console *cons;
35994 + int is_console = 0;
35995
35996 err = kgdboc_option_setup(config);
35997 if (err || !strlen(config) || isspace(config[0]))
35998 goto noconfig;
35999
36000 err = -ENODEV;
36001 - kgdboc_io_ops.is_console = 0;
36002 kgdb_tty_driver = NULL;
36003
36004 kgdboc_use_kms = 0;
36005 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
36006 int idx;
36007 if (cons->device && cons->device(cons, &idx) == p &&
36008 idx == tty_line) {
36009 - kgdboc_io_ops.is_console = 1;
36010 + is_console = 1;
36011 break;
36012 }
36013 cons = cons->next;
36014 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
36015 kgdb_tty_line = tty_line;
36016
36017 do_register:
36018 - err = kgdb_register_io_module(&kgdboc_io_ops);
36019 + if (is_console) {
36020 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
36021 + configured = 2;
36022 + } else {
36023 + err = kgdb_register_io_module(&kgdboc_io_ops);
36024 + configured = 1;
36025 + }
36026 if (err)
36027 goto noconfig;
36028
36029 - configured = 1;
36030 -
36031 return 0;
36032
36033 noconfig:
36034 @@ -213,7 +220,7 @@ noconfig:
36035 static int __init init_kgdboc(void)
36036 {
36037 /* Already configured? */
36038 - if (configured == 1)
36039 + if (configured >= 1)
36040 return 0;
36041
36042 return configure_kgdboc();
36043 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
36044 if (config[len - 1] == '\n')
36045 config[len - 1] = '\0';
36046
36047 - if (configured == 1)
36048 + if (configured >= 1)
36049 cleanup_kgdboc();
36050
36051 /* Go and configure with the new params. */
36052 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
36053 .post_exception = kgdboc_post_exp_handler,
36054 };
36055
36056 +static struct kgdb_io kgdboc_io_ops_console = {
36057 + .name = "kgdboc",
36058 + .read_char = kgdboc_get_char,
36059 + .write_char = kgdboc_put_char,
36060 + .pre_exception = kgdboc_pre_exp_handler,
36061 + .post_exception = kgdboc_post_exp_handler,
36062 + .is_console = 1
36063 +};
36064 +
36065 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
36066 /* This is only available if kgdboc is a built in for early debugging */
36067 static int __init kgdboc_early_init(char *opt)
36068 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
36069 index 05085be..67eadb0 100644
36070 --- a/drivers/tty/tty_io.c
36071 +++ b/drivers/tty/tty_io.c
36072 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
36073
36074 void tty_default_fops(struct file_operations *fops)
36075 {
36076 - *fops = tty_fops;
36077 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
36078 }
36079
36080 /*
36081 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
36082 index 8e0924f..4204eb4 100644
36083 --- a/drivers/tty/tty_ldisc.c
36084 +++ b/drivers/tty/tty_ldisc.c
36085 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
36086 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
36087 struct tty_ldisc_ops *ldo = ld->ops;
36088
36089 - ldo->refcount--;
36090 + atomic_dec(&ldo->refcount);
36091 module_put(ldo->owner);
36092 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36093
36094 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
36095 spin_lock_irqsave(&tty_ldisc_lock, flags);
36096 tty_ldiscs[disc] = new_ldisc;
36097 new_ldisc->num = disc;
36098 - new_ldisc->refcount = 0;
36099 + atomic_set(&new_ldisc->refcount, 0);
36100 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36101
36102 return ret;
36103 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
36104 return -EINVAL;
36105
36106 spin_lock_irqsave(&tty_ldisc_lock, flags);
36107 - if (tty_ldiscs[disc]->refcount)
36108 + if (atomic_read(&tty_ldiscs[disc]->refcount))
36109 ret = -EBUSY;
36110 else
36111 tty_ldiscs[disc] = NULL;
36112 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
36113 if (ldops) {
36114 ret = ERR_PTR(-EAGAIN);
36115 if (try_module_get(ldops->owner)) {
36116 - ldops->refcount++;
36117 + atomic_inc(&ldops->refcount);
36118 ret = ldops;
36119 }
36120 }
36121 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
36122 unsigned long flags;
36123
36124 spin_lock_irqsave(&tty_ldisc_lock, flags);
36125 - ldops->refcount--;
36126 + atomic_dec(&ldops->refcount);
36127 module_put(ldops->owner);
36128 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36129 }
36130 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
36131 index a605549..6bd3c96 100644
36132 --- a/drivers/tty/vt/keyboard.c
36133 +++ b/drivers/tty/vt/keyboard.c
36134 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
36135 kbd->kbdmode == VC_OFF) &&
36136 value != KVAL(K_SAK))
36137 return; /* SAK is allowed even in raw mode */
36138 +
36139 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36140 + {
36141 + void *func = fn_handler[value];
36142 + if (func == fn_show_state || func == fn_show_ptregs ||
36143 + func == fn_show_mem)
36144 + return;
36145 + }
36146 +#endif
36147 +
36148 fn_handler[value](vc);
36149 }
36150
36151 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
36152 index 65447c5..0526f0a 100644
36153 --- a/drivers/tty/vt/vt_ioctl.c
36154 +++ b/drivers/tty/vt/vt_ioctl.c
36155 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36156 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36157 return -EFAULT;
36158
36159 - if (!capable(CAP_SYS_TTY_CONFIG))
36160 - perm = 0;
36161 -
36162 switch (cmd) {
36163 case KDGKBENT:
36164 key_map = key_maps[s];
36165 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36166 val = (i ? K_HOLE : K_NOSUCHMAP);
36167 return put_user(val, &user_kbe->kb_value);
36168 case KDSKBENT:
36169 + if (!capable(CAP_SYS_TTY_CONFIG))
36170 + perm = 0;
36171 +
36172 if (!perm)
36173 return -EPERM;
36174 if (!i && v == K_NOSUCHMAP) {
36175 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36176 int i, j, k;
36177 int ret;
36178
36179 - if (!capable(CAP_SYS_TTY_CONFIG))
36180 - perm = 0;
36181 -
36182 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36183 if (!kbs) {
36184 ret = -ENOMEM;
36185 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36186 kfree(kbs);
36187 return ((p && *p) ? -EOVERFLOW : 0);
36188 case KDSKBSENT:
36189 + if (!capable(CAP_SYS_TTY_CONFIG))
36190 + perm = 0;
36191 +
36192 if (!perm) {
36193 ret = -EPERM;
36194 goto reterr;
36195 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
36196 index a783d53..cb30d94 100644
36197 --- a/drivers/uio/uio.c
36198 +++ b/drivers/uio/uio.c
36199 @@ -25,6 +25,7 @@
36200 #include <linux/kobject.h>
36201 #include <linux/cdev.h>
36202 #include <linux/uio_driver.h>
36203 +#include <asm/local.h>
36204
36205 #define UIO_MAX_DEVICES (1U << MINORBITS)
36206
36207 @@ -32,10 +33,10 @@ struct uio_device {
36208 struct module *owner;
36209 struct device *dev;
36210 int minor;
36211 - atomic_t event;
36212 + atomic_unchecked_t event;
36213 struct fasync_struct *async_queue;
36214 wait_queue_head_t wait;
36215 - int vma_count;
36216 + local_t vma_count;
36217 struct uio_info *info;
36218 struct kobject *map_dir;
36219 struct kobject *portio_dir;
36220 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36221 struct device_attribute *attr, char *buf)
36222 {
36223 struct uio_device *idev = dev_get_drvdata(dev);
36224 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36225 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36226 }
36227
36228 static struct device_attribute uio_class_attributes[] = {
36229 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36230 {
36231 struct uio_device *idev = info->uio_dev;
36232
36233 - atomic_inc(&idev->event);
36234 + atomic_inc_unchecked(&idev->event);
36235 wake_up_interruptible(&idev->wait);
36236 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36237 }
36238 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36239 }
36240
36241 listener->dev = idev;
36242 - listener->event_count = atomic_read(&idev->event);
36243 + listener->event_count = atomic_read_unchecked(&idev->event);
36244 filep->private_data = listener;
36245
36246 if (idev->info->open) {
36247 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36248 return -EIO;
36249
36250 poll_wait(filep, &idev->wait, wait);
36251 - if (listener->event_count != atomic_read(&idev->event))
36252 + if (listener->event_count != atomic_read_unchecked(&idev->event))
36253 return POLLIN | POLLRDNORM;
36254 return 0;
36255 }
36256 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36257 do {
36258 set_current_state(TASK_INTERRUPTIBLE);
36259
36260 - event_count = atomic_read(&idev->event);
36261 + event_count = atomic_read_unchecked(&idev->event);
36262 if (event_count != listener->event_count) {
36263 if (copy_to_user(buf, &event_count, count))
36264 retval = -EFAULT;
36265 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36266 static void uio_vma_open(struct vm_area_struct *vma)
36267 {
36268 struct uio_device *idev = vma->vm_private_data;
36269 - idev->vma_count++;
36270 + local_inc(&idev->vma_count);
36271 }
36272
36273 static void uio_vma_close(struct vm_area_struct *vma)
36274 {
36275 struct uio_device *idev = vma->vm_private_data;
36276 - idev->vma_count--;
36277 + local_dec(&idev->vma_count);
36278 }
36279
36280 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36281 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36282 idev->owner = owner;
36283 idev->info = info;
36284 init_waitqueue_head(&idev->wait);
36285 - atomic_set(&idev->event, 0);
36286 + atomic_set_unchecked(&idev->event, 0);
36287
36288 ret = uio_get_minor(idev);
36289 if (ret)
36290 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36291 index a845f8b..4f54072 100644
36292 --- a/drivers/usb/atm/cxacru.c
36293 +++ b/drivers/usb/atm/cxacru.c
36294 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36295 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36296 if (ret < 2)
36297 return -EINVAL;
36298 - if (index < 0 || index > 0x7f)
36299 + if (index > 0x7f)
36300 return -EINVAL;
36301 pos += tmp;
36302
36303 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36304 index d3448ca..d2864ca 100644
36305 --- a/drivers/usb/atm/usbatm.c
36306 +++ b/drivers/usb/atm/usbatm.c
36307 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36308 if (printk_ratelimit())
36309 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36310 __func__, vpi, vci);
36311 - atomic_inc(&vcc->stats->rx_err);
36312 + atomic_inc_unchecked(&vcc->stats->rx_err);
36313 return;
36314 }
36315
36316 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36317 if (length > ATM_MAX_AAL5_PDU) {
36318 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36319 __func__, length, vcc);
36320 - atomic_inc(&vcc->stats->rx_err);
36321 + atomic_inc_unchecked(&vcc->stats->rx_err);
36322 goto out;
36323 }
36324
36325 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36326 if (sarb->len < pdu_length) {
36327 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36328 __func__, pdu_length, sarb->len, vcc);
36329 - atomic_inc(&vcc->stats->rx_err);
36330 + atomic_inc_unchecked(&vcc->stats->rx_err);
36331 goto out;
36332 }
36333
36334 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36335 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36336 __func__, vcc);
36337 - atomic_inc(&vcc->stats->rx_err);
36338 + atomic_inc_unchecked(&vcc->stats->rx_err);
36339 goto out;
36340 }
36341
36342 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36343 if (printk_ratelimit())
36344 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36345 __func__, length);
36346 - atomic_inc(&vcc->stats->rx_drop);
36347 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36348 goto out;
36349 }
36350
36351 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36352
36353 vcc->push(vcc, skb);
36354
36355 - atomic_inc(&vcc->stats->rx);
36356 + atomic_inc_unchecked(&vcc->stats->rx);
36357 out:
36358 skb_trim(sarb, 0);
36359 }
36360 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36361 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36362
36363 usbatm_pop(vcc, skb);
36364 - atomic_inc(&vcc->stats->tx);
36365 + atomic_inc_unchecked(&vcc->stats->tx);
36366
36367 skb = skb_dequeue(&instance->sndqueue);
36368 }
36369 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36370 if (!left--)
36371 return sprintf(page,
36372 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36373 - atomic_read(&atm_dev->stats.aal5.tx),
36374 - atomic_read(&atm_dev->stats.aal5.tx_err),
36375 - atomic_read(&atm_dev->stats.aal5.rx),
36376 - atomic_read(&atm_dev->stats.aal5.rx_err),
36377 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36378 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36379 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36380 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36381 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36382 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36383
36384 if (!left--) {
36385 if (instance->disconnected)
36386 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36387 index d956965..4179a77 100644
36388 --- a/drivers/usb/core/devices.c
36389 +++ b/drivers/usb/core/devices.c
36390 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36391 * time it gets called.
36392 */
36393 static struct device_connect_event {
36394 - atomic_t count;
36395 + atomic_unchecked_t count;
36396 wait_queue_head_t wait;
36397 } device_event = {
36398 .count = ATOMIC_INIT(1),
36399 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36400
36401 void usbfs_conn_disc_event(void)
36402 {
36403 - atomic_add(2, &device_event.count);
36404 + atomic_add_unchecked(2, &device_event.count);
36405 wake_up(&device_event.wait);
36406 }
36407
36408 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36409
36410 poll_wait(file, &device_event.wait, wait);
36411
36412 - event_count = atomic_read(&device_event.count);
36413 + event_count = atomic_read_unchecked(&device_event.count);
36414 if (file->f_version != event_count) {
36415 file->f_version = event_count;
36416 return POLLIN | POLLRDNORM;
36417 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36418 index b3bdfed..a9460e0 100644
36419 --- a/drivers/usb/core/message.c
36420 +++ b/drivers/usb/core/message.c
36421 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36422 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36423 if (buf) {
36424 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36425 - if (len > 0) {
36426 - smallbuf = kmalloc(++len, GFP_NOIO);
36427 + if (len++ > 0) {
36428 + smallbuf = kmalloc(len, GFP_NOIO);
36429 if (!smallbuf)
36430 return buf;
36431 memcpy(smallbuf, buf, len);
36432 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36433 index 1fc8f12..20647c1 100644
36434 --- a/drivers/usb/early/ehci-dbgp.c
36435 +++ b/drivers/usb/early/ehci-dbgp.c
36436 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36437
36438 #ifdef CONFIG_KGDB
36439 static struct kgdb_io kgdbdbgp_io_ops;
36440 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36441 +static struct kgdb_io kgdbdbgp_io_ops_console;
36442 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36443 #else
36444 #define dbgp_kgdb_mode (0)
36445 #endif
36446 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36447 .write_char = kgdbdbgp_write_char,
36448 };
36449
36450 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36451 + .name = "kgdbdbgp",
36452 + .read_char = kgdbdbgp_read_char,
36453 + .write_char = kgdbdbgp_write_char,
36454 + .is_console = 1
36455 +};
36456 +
36457 static int kgdbdbgp_wait_time;
36458
36459 static int __init kgdbdbgp_parse_config(char *str)
36460 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36461 ptr++;
36462 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36463 }
36464 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36465 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36466 + if (early_dbgp_console.index != -1)
36467 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36468 + else
36469 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36470
36471 return 0;
36472 }
36473 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36474 index d6bea3e..60b250e 100644
36475 --- a/drivers/usb/wusbcore/wa-hc.h
36476 +++ b/drivers/usb/wusbcore/wa-hc.h
36477 @@ -192,7 +192,7 @@ struct wahc {
36478 struct list_head xfer_delayed_list;
36479 spinlock_t xfer_list_lock;
36480 struct work_struct xfer_work;
36481 - atomic_t xfer_id_count;
36482 + atomic_unchecked_t xfer_id_count;
36483 };
36484
36485
36486 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36487 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36488 spin_lock_init(&wa->xfer_list_lock);
36489 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36490 - atomic_set(&wa->xfer_id_count, 1);
36491 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36492 }
36493
36494 /**
36495 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36496 index 57c01ab..8a05959 100644
36497 --- a/drivers/usb/wusbcore/wa-xfer.c
36498 +++ b/drivers/usb/wusbcore/wa-xfer.c
36499 @@ -296,7 +296,7 @@ out:
36500 */
36501 static void wa_xfer_id_init(struct wa_xfer *xfer)
36502 {
36503 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36504 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36505 }
36506
36507 /*
36508 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36509 index c14c42b..f955cc2 100644
36510 --- a/drivers/vhost/vhost.c
36511 +++ b/drivers/vhost/vhost.c
36512 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36513 return 0;
36514 }
36515
36516 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36517 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36518 {
36519 struct file *eventfp, *filep = NULL,
36520 *pollstart = NULL, *pollstop = NULL;
36521 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36522 index b0b2ac3..89a4399 100644
36523 --- a/drivers/video/aty/aty128fb.c
36524 +++ b/drivers/video/aty/aty128fb.c
36525 @@ -148,7 +148,7 @@ enum {
36526 };
36527
36528 /* Must match above enum */
36529 -static const char *r128_family[] __devinitdata = {
36530 +static const char *r128_family[] __devinitconst = {
36531 "AGP",
36532 "PCI",
36533 "PRO AGP",
36534 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36535 index 5c3960d..15cf8fc 100644
36536 --- a/drivers/video/fbcmap.c
36537 +++ b/drivers/video/fbcmap.c
36538 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36539 rc = -ENODEV;
36540 goto out;
36541 }
36542 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36543 - !info->fbops->fb_setcmap)) {
36544 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36545 rc = -EINVAL;
36546 goto out1;
36547 }
36548 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36549 index ad93629..e020fc3 100644
36550 --- a/drivers/video/fbmem.c
36551 +++ b/drivers/video/fbmem.c
36552 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36553 image->dx += image->width + 8;
36554 }
36555 } else if (rotate == FB_ROTATE_UD) {
36556 - for (x = 0; x < num && image->dx >= 0; x++) {
36557 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36558 info->fbops->fb_imageblit(info, image);
36559 image->dx -= image->width + 8;
36560 }
36561 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36562 image->dy += image->height + 8;
36563 }
36564 } else if (rotate == FB_ROTATE_CCW) {
36565 - for (x = 0; x < num && image->dy >= 0; x++) {
36566 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36567 info->fbops->fb_imageblit(info, image);
36568 image->dy -= image->height + 8;
36569 }
36570 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36571 return -EFAULT;
36572 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36573 return -EINVAL;
36574 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36575 + if (con2fb.framebuffer >= FB_MAX)
36576 return -EINVAL;
36577 if (!registered_fb[con2fb.framebuffer])
36578 request_module("fb%d", con2fb.framebuffer);
36579 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36580 index 5a5d092..265c5ed 100644
36581 --- a/drivers/video/geode/gx1fb_core.c
36582 +++ b/drivers/video/geode/gx1fb_core.c
36583 @@ -29,7 +29,7 @@ static int crt_option = 1;
36584 static char panel_option[32] = "";
36585
36586 /* Modes relevant to the GX1 (taken from modedb.c) */
36587 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36588 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36589 /* 640x480-60 VESA */
36590 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36591 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36592 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36593 index 0fad23f..0e9afa4 100644
36594 --- a/drivers/video/gxt4500.c
36595 +++ b/drivers/video/gxt4500.c
36596 @@ -156,7 +156,7 @@ struct gxt4500_par {
36597 static char *mode_option;
36598
36599 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36600 -static const struct fb_videomode defaultmode __devinitdata = {
36601 +static const struct fb_videomode defaultmode __devinitconst = {
36602 .refresh = 60,
36603 .xres = 1280,
36604 .yres = 1024,
36605 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36606 return 0;
36607 }
36608
36609 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36610 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36611 .id = "IBM GXT4500P",
36612 .type = FB_TYPE_PACKED_PIXELS,
36613 .visual = FB_VISUAL_PSEUDOCOLOR,
36614 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36615 index 7672d2e..b56437f 100644
36616 --- a/drivers/video/i810/i810_accel.c
36617 +++ b/drivers/video/i810/i810_accel.c
36618 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36619 }
36620 }
36621 printk("ringbuffer lockup!!!\n");
36622 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36623 i810_report_error(mmio);
36624 par->dev_flags |= LOCKUP;
36625 info->pixmap.scan_align = 1;
36626 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36627 index 318f6fb..9a389c1 100644
36628 --- a/drivers/video/i810/i810_main.c
36629 +++ b/drivers/video/i810/i810_main.c
36630 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36631 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36632
36633 /* PCI */
36634 -static const char *i810_pci_list[] __devinitdata = {
36635 +static const char *i810_pci_list[] __devinitconst = {
36636 "Intel(R) 810 Framebuffer Device" ,
36637 "Intel(R) 810-DC100 Framebuffer Device" ,
36638 "Intel(R) 810E Framebuffer Device" ,
36639 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36640 index de36693..3c63fc2 100644
36641 --- a/drivers/video/jz4740_fb.c
36642 +++ b/drivers/video/jz4740_fb.c
36643 @@ -136,7 +136,7 @@ struct jzfb {
36644 uint32_t pseudo_palette[16];
36645 };
36646
36647 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36648 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36649 .id = "JZ4740 FB",
36650 .type = FB_TYPE_PACKED_PIXELS,
36651 .visual = FB_VISUAL_TRUECOLOR,
36652 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36653 index 3c14e43..eafa544 100644
36654 --- a/drivers/video/logo/logo_linux_clut224.ppm
36655 +++ b/drivers/video/logo/logo_linux_clut224.ppm
36656 @@ -1,1604 +1,1123 @@
36657 P3
36658 -# Standard 224-color Linux logo
36659 80 80
36660 255
36661 - 0 0 0 0 0 0 0 0 0 0 0 0
36662 - 0 0 0 0 0 0 0 0 0 0 0 0
36663 - 0 0 0 0 0 0 0 0 0 0 0 0
36664 - 0 0 0 0 0 0 0 0 0 0 0 0
36665 - 0 0 0 0 0 0 0 0 0 0 0 0
36666 - 0 0 0 0 0 0 0 0 0 0 0 0
36667 - 0 0 0 0 0 0 0 0 0 0 0 0
36668 - 0 0 0 0 0 0 0 0 0 0 0 0
36669 - 0 0 0 0 0 0 0 0 0 0 0 0
36670 - 6 6 6 6 6 6 10 10 10 10 10 10
36671 - 10 10 10 6 6 6 6 6 6 6 6 6
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 0 0 0 0 0 0 0 0 0
36677 - 0 0 0 0 0 0 0 0 0 0 0 0
36678 - 0 0 0 0 0 0 0 0 0 0 0 0
36679 - 0 0 0 0 0 0 0 0 0 0 0 0
36680 - 0 0 0 0 0 0 0 0 0 0 0 0
36681 - 0 0 0 0 0 0 0 0 0 0 0 0
36682 - 0 0 0 0 0 0 0 0 0 0 0 0
36683 - 0 0 0 0 0 0 0 0 0 0 0 0
36684 - 0 0 0 0 0 0 0 0 0 0 0 0
36685 - 0 0 0 0 0 0 0 0 0 0 0 0
36686 - 0 0 0 0 0 0 0 0 0 0 0 0
36687 - 0 0 0 0 0 0 0 0 0 0 0 0
36688 - 0 0 0 0 0 0 0 0 0 0 0 0
36689 - 0 0 0 6 6 6 10 10 10 14 14 14
36690 - 22 22 22 26 26 26 30 30 30 34 34 34
36691 - 30 30 30 30 30 30 26 26 26 18 18 18
36692 - 14 14 14 10 10 10 6 6 6 0 0 0
36693 - 0 0 0 0 0 0 0 0 0 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 0 0 0 0 0 0 0 0 0 0 0 0
36697 - 0 0 0 0 0 0 0 0 0 0 0 0
36698 - 0 0 0 0 0 0 0 0 0 0 0 0
36699 - 0 0 0 0 0 0 0 0 0 0 0 0
36700 - 0 0 0 0 0 0 0 0 0 0 0 0
36701 - 0 0 0 0 0 0 0 0 0 0 0 0
36702 - 0 0 0 0 0 1 0 0 1 0 0 0
36703 - 0 0 0 0 0 0 0 0 0 0 0 0
36704 - 0 0 0 0 0 0 0 0 0 0 0 0
36705 - 0 0 0 0 0 0 0 0 0 0 0 0
36706 - 0 0 0 0 0 0 0 0 0 0 0 0
36707 - 0 0 0 0 0 0 0 0 0 0 0 0
36708 - 0 0 0 0 0 0 0 0 0 0 0 0
36709 - 6 6 6 14 14 14 26 26 26 42 42 42
36710 - 54 54 54 66 66 66 78 78 78 78 78 78
36711 - 78 78 78 74 74 74 66 66 66 54 54 54
36712 - 42 42 42 26 26 26 18 18 18 10 10 10
36713 - 6 6 6 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 0 0 0 0 0 0 0 0 0 0 0 0
36717 - 0 0 0 0 0 0 0 0 0 0 0 0
36718 - 0 0 0 0 0 0 0 0 0 0 0 0
36719 - 0 0 0 0 0 0 0 0 0 0 0 0
36720 - 0 0 0 0 0 0 0 0 0 0 0 0
36721 - 0 0 0 0 0 0 0 0 0 0 0 0
36722 - 0 0 1 0 0 0 0 0 0 0 0 0
36723 - 0 0 0 0 0 0 0 0 0 0 0 0
36724 - 0 0 0 0 0 0 0 0 0 0 0 0
36725 - 0 0 0 0 0 0 0 0 0 0 0 0
36726 - 0 0 0 0 0 0 0 0 0 0 0 0
36727 - 0 0 0 0 0 0 0 0 0 0 0 0
36728 - 0 0 0 0 0 0 0 0 0 10 10 10
36729 - 22 22 22 42 42 42 66 66 66 86 86 86
36730 - 66 66 66 38 38 38 38 38 38 22 22 22
36731 - 26 26 26 34 34 34 54 54 54 66 66 66
36732 - 86 86 86 70 70 70 46 46 46 26 26 26
36733 - 14 14 14 6 6 6 0 0 0 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 0 0 0 0 0 0 0 0 0 0 0 0
36737 - 0 0 0 0 0 0 0 0 0 0 0 0
36738 - 0 0 0 0 0 0 0 0 0 0 0 0
36739 - 0 0 0 0 0 0 0 0 0 0 0 0
36740 - 0 0 0 0 0 0 0 0 0 0 0 0
36741 - 0 0 0 0 0 0 0 0 0 0 0 0
36742 - 0 0 1 0 0 1 0 0 1 0 0 0
36743 - 0 0 0 0 0 0 0 0 0 0 0 0
36744 - 0 0 0 0 0 0 0 0 0 0 0 0
36745 - 0 0 0 0 0 0 0 0 0 0 0 0
36746 - 0 0 0 0 0 0 0 0 0 0 0 0
36747 - 0 0 0 0 0 0 0 0 0 0 0 0
36748 - 0 0 0 0 0 0 10 10 10 26 26 26
36749 - 50 50 50 82 82 82 58 58 58 6 6 6
36750 - 2 2 6 2 2 6 2 2 6 2 2 6
36751 - 2 2 6 2 2 6 2 2 6 2 2 6
36752 - 6 6 6 54 54 54 86 86 86 66 66 66
36753 - 38 38 38 18 18 18 6 6 6 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 0 0 0
36756 - 0 0 0 0 0 0 0 0 0 0 0 0
36757 - 0 0 0 0 0 0 0 0 0 0 0 0
36758 - 0 0 0 0 0 0 0 0 0 0 0 0
36759 - 0 0 0 0 0 0 0 0 0 0 0 0
36760 - 0 0 0 0 0 0 0 0 0 0 0 0
36761 - 0 0 0 0 0 0 0 0 0 0 0 0
36762 - 0 0 0 0 0 0 0 0 0 0 0 0
36763 - 0 0 0 0 0 0 0 0 0 0 0 0
36764 - 0 0 0 0 0 0 0 0 0 0 0 0
36765 - 0 0 0 0 0 0 0 0 0 0 0 0
36766 - 0 0 0 0 0 0 0 0 0 0 0 0
36767 - 0 0 0 0 0 0 0 0 0 0 0 0
36768 - 0 0 0 6 6 6 22 22 22 50 50 50
36769 - 78 78 78 34 34 34 2 2 6 2 2 6
36770 - 2 2 6 2 2 6 2 2 6 2 2 6
36771 - 2 2 6 2 2 6 2 2 6 2 2 6
36772 - 2 2 6 2 2 6 6 6 6 70 70 70
36773 - 78 78 78 46 46 46 22 22 22 6 6 6
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 0 0 0
36776 - 0 0 0 0 0 0 0 0 0 0 0 0
36777 - 0 0 0 0 0 0 0 0 0 0 0 0
36778 - 0 0 0 0 0 0 0 0 0 0 0 0
36779 - 0 0 0 0 0 0 0 0 0 0 0 0
36780 - 0 0 0 0 0 0 0 0 0 0 0 0
36781 - 0 0 0 0 0 0 0 0 0 0 0 0
36782 - 0 0 1 0 0 1 0 0 1 0 0 0
36783 - 0 0 0 0 0 0 0 0 0 0 0 0
36784 - 0 0 0 0 0 0 0 0 0 0 0 0
36785 - 0 0 0 0 0 0 0 0 0 0 0 0
36786 - 0 0 0 0 0 0 0 0 0 0 0 0
36787 - 0 0 0 0 0 0 0 0 0 0 0 0
36788 - 6 6 6 18 18 18 42 42 42 82 82 82
36789 - 26 26 26 2 2 6 2 2 6 2 2 6
36790 - 2 2 6 2 2 6 2 2 6 2 2 6
36791 - 2 2 6 2 2 6 2 2 6 14 14 14
36792 - 46 46 46 34 34 34 6 6 6 2 2 6
36793 - 42 42 42 78 78 78 42 42 42 18 18 18
36794 - 6 6 6 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 0 0 0
36796 - 0 0 0 0 0 0 0 0 0 0 0 0
36797 - 0 0 0 0 0 0 0 0 0 0 0 0
36798 - 0 0 0 0 0 0 0 0 0 0 0 0
36799 - 0 0 0 0 0 0 0 0 0 0 0 0
36800 - 0 0 0 0 0 0 0 0 0 0 0 0
36801 - 0 0 0 0 0 0 0 0 0 0 0 0
36802 - 0 0 1 0 0 0 0 0 1 0 0 0
36803 - 0 0 0 0 0 0 0 0 0 0 0 0
36804 - 0 0 0 0 0 0 0 0 0 0 0 0
36805 - 0 0 0 0 0 0 0 0 0 0 0 0
36806 - 0 0 0 0 0 0 0 0 0 0 0 0
36807 - 0 0 0 0 0 0 0 0 0 0 0 0
36808 - 10 10 10 30 30 30 66 66 66 58 58 58
36809 - 2 2 6 2 2 6 2 2 6 2 2 6
36810 - 2 2 6 2 2 6 2 2 6 2 2 6
36811 - 2 2 6 2 2 6 2 2 6 26 26 26
36812 - 86 86 86 101 101 101 46 46 46 10 10 10
36813 - 2 2 6 58 58 58 70 70 70 34 34 34
36814 - 10 10 10 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 0 0 0
36816 - 0 0 0 0 0 0 0 0 0 0 0 0
36817 - 0 0 0 0 0 0 0 0 0 0 0 0
36818 - 0 0 0 0 0 0 0 0 0 0 0 0
36819 - 0 0 0 0 0 0 0 0 0 0 0 0
36820 - 0 0 0 0 0 0 0 0 0 0 0 0
36821 - 0 0 0 0 0 0 0 0 0 0 0 0
36822 - 0 0 1 0 0 1 0 0 1 0 0 0
36823 - 0 0 0 0 0 0 0 0 0 0 0 0
36824 - 0 0 0 0 0 0 0 0 0 0 0 0
36825 - 0 0 0 0 0 0 0 0 0 0 0 0
36826 - 0 0 0 0 0 0 0 0 0 0 0 0
36827 - 0 0 0 0 0 0 0 0 0 0 0 0
36828 - 14 14 14 42 42 42 86 86 86 10 10 10
36829 - 2 2 6 2 2 6 2 2 6 2 2 6
36830 - 2 2 6 2 2 6 2 2 6 2 2 6
36831 - 2 2 6 2 2 6 2 2 6 30 30 30
36832 - 94 94 94 94 94 94 58 58 58 26 26 26
36833 - 2 2 6 6 6 6 78 78 78 54 54 54
36834 - 22 22 22 6 6 6 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 0 0 0
36836 - 0 0 0 0 0 0 0 0 0 0 0 0
36837 - 0 0 0 0 0 0 0 0 0 0 0 0
36838 - 0 0 0 0 0 0 0 0 0 0 0 0
36839 - 0 0 0 0 0 0 0 0 0 0 0 0
36840 - 0 0 0 0 0 0 0 0 0 0 0 0
36841 - 0 0 0 0 0 0 0 0 0 0 0 0
36842 - 0 0 0 0 0 0 0 0 0 0 0 0
36843 - 0 0 0 0 0 0 0 0 0 0 0 0
36844 - 0 0 0 0 0 0 0 0 0 0 0 0
36845 - 0 0 0 0 0 0 0 0 0 0 0 0
36846 - 0 0 0 0 0 0 0 0 0 0 0 0
36847 - 0 0 0 0 0 0 0 0 0 6 6 6
36848 - 22 22 22 62 62 62 62 62 62 2 2 6
36849 - 2 2 6 2 2 6 2 2 6 2 2 6
36850 - 2 2 6 2 2 6 2 2 6 2 2 6
36851 - 2 2 6 2 2 6 2 2 6 26 26 26
36852 - 54 54 54 38 38 38 18 18 18 10 10 10
36853 - 2 2 6 2 2 6 34 34 34 82 82 82
36854 - 38 38 38 14 14 14 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 0 0 0
36856 - 0 0 0 0 0 0 0 0 0 0 0 0
36857 - 0 0 0 0 0 0 0 0 0 0 0 0
36858 - 0 0 0 0 0 0 0 0 0 0 0 0
36859 - 0 0 0 0 0 0 0 0 0 0 0 0
36860 - 0 0 0 0 0 0 0 0 0 0 0 0
36861 - 0 0 0 0 0 0 0 0 0 0 0 0
36862 - 0 0 0 0 0 1 0 0 1 0 0 0
36863 - 0 0 0 0 0 0 0 0 0 0 0 0
36864 - 0 0 0 0 0 0 0 0 0 0 0 0
36865 - 0 0 0 0 0 0 0 0 0 0 0 0
36866 - 0 0 0 0 0 0 0 0 0 0 0 0
36867 - 0 0 0 0 0 0 0 0 0 6 6 6
36868 - 30 30 30 78 78 78 30 30 30 2 2 6
36869 - 2 2 6 2 2 6 2 2 6 2 2 6
36870 - 2 2 6 2 2 6 2 2 6 2 2 6
36871 - 2 2 6 2 2 6 2 2 6 10 10 10
36872 - 10 10 10 2 2 6 2 2 6 2 2 6
36873 - 2 2 6 2 2 6 2 2 6 78 78 78
36874 - 50 50 50 18 18 18 6 6 6 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 0 0 0
36876 - 0 0 0 0 0 0 0 0 0 0 0 0
36877 - 0 0 0 0 0 0 0 0 0 0 0 0
36878 - 0 0 0 0 0 0 0 0 0 0 0 0
36879 - 0 0 0 0 0 0 0 0 0 0 0 0
36880 - 0 0 0 0 0 0 0 0 0 0 0 0
36881 - 0 0 0 0 0 0 0 0 0 0 0 0
36882 - 0 0 1 0 0 0 0 0 0 0 0 0
36883 - 0 0 0 0 0 0 0 0 0 0 0 0
36884 - 0 0 0 0 0 0 0 0 0 0 0 0
36885 - 0 0 0 0 0 0 0 0 0 0 0 0
36886 - 0 0 0 0 0 0 0 0 0 0 0 0
36887 - 0 0 0 0 0 0 0 0 0 10 10 10
36888 - 38 38 38 86 86 86 14 14 14 2 2 6
36889 - 2 2 6 2 2 6 2 2 6 2 2 6
36890 - 2 2 6 2 2 6 2 2 6 2 2 6
36891 - 2 2 6 2 2 6 2 2 6 2 2 6
36892 - 2 2 6 2 2 6 2 2 6 2 2 6
36893 - 2 2 6 2 2 6 2 2 6 54 54 54
36894 - 66 66 66 26 26 26 6 6 6 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 0 0 0
36896 - 0 0 0 0 0 0 0 0 0 0 0 0
36897 - 0 0 0 0 0 0 0 0 0 0 0 0
36898 - 0 0 0 0 0 0 0 0 0 0 0 0
36899 - 0 0 0 0 0 0 0 0 0 0 0 0
36900 - 0 0 0 0 0 0 0 0 0 0 0 0
36901 - 0 0 0 0 0 0 0 0 0 0 0 0
36902 - 0 0 0 0 0 1 0 0 1 0 0 0
36903 - 0 0 0 0 0 0 0 0 0 0 0 0
36904 - 0 0 0 0 0 0 0 0 0 0 0 0
36905 - 0 0 0 0 0 0 0 0 0 0 0 0
36906 - 0 0 0 0 0 0 0 0 0 0 0 0
36907 - 0 0 0 0 0 0 0 0 0 14 14 14
36908 - 42 42 42 82 82 82 2 2 6 2 2 6
36909 - 2 2 6 6 6 6 10 10 10 2 2 6
36910 - 2 2 6 2 2 6 2 2 6 2 2 6
36911 - 2 2 6 2 2 6 2 2 6 6 6 6
36912 - 14 14 14 10 10 10 2 2 6 2 2 6
36913 - 2 2 6 2 2 6 2 2 6 18 18 18
36914 - 82 82 82 34 34 34 10 10 10 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 0 0 0
36916 - 0 0 0 0 0 0 0 0 0 0 0 0
36917 - 0 0 0 0 0 0 0 0 0 0 0 0
36918 - 0 0 0 0 0 0 0 0 0 0 0 0
36919 - 0 0 0 0 0 0 0 0 0 0 0 0
36920 - 0 0 0 0 0 0 0 0 0 0 0 0
36921 - 0 0 0 0 0 0 0 0 0 0 0 0
36922 - 0 0 1 0 0 0 0 0 0 0 0 0
36923 - 0 0 0 0 0 0 0 0 0 0 0 0
36924 - 0 0 0 0 0 0 0 0 0 0 0 0
36925 - 0 0 0 0 0 0 0 0 0 0 0 0
36926 - 0 0 0 0 0 0 0 0 0 0 0 0
36927 - 0 0 0 0 0 0 0 0 0 14 14 14
36928 - 46 46 46 86 86 86 2 2 6 2 2 6
36929 - 6 6 6 6 6 6 22 22 22 34 34 34
36930 - 6 6 6 2 2 6 2 2 6 2 2 6
36931 - 2 2 6 2 2 6 18 18 18 34 34 34
36932 - 10 10 10 50 50 50 22 22 22 2 2 6
36933 - 2 2 6 2 2 6 2 2 6 10 10 10
36934 - 86 86 86 42 42 42 14 14 14 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 0 0 0
36936 - 0 0 0 0 0 0 0 0 0 0 0 0
36937 - 0 0 0 0 0 0 0 0 0 0 0 0
36938 - 0 0 0 0 0 0 0 0 0 0 0 0
36939 - 0 0 0 0 0 0 0 0 0 0 0 0
36940 - 0 0 0 0 0 0 0 0 0 0 0 0
36941 - 0 0 0 0 0 0 0 0 0 0 0 0
36942 - 0 0 1 0 0 1 0 0 1 0 0 0
36943 - 0 0 0 0 0 0 0 0 0 0 0 0
36944 - 0 0 0 0 0 0 0 0 0 0 0 0
36945 - 0 0 0 0 0 0 0 0 0 0 0 0
36946 - 0 0 0 0 0 0 0 0 0 0 0 0
36947 - 0 0 0 0 0 0 0 0 0 14 14 14
36948 - 46 46 46 86 86 86 2 2 6 2 2 6
36949 - 38 38 38 116 116 116 94 94 94 22 22 22
36950 - 22 22 22 2 2 6 2 2 6 2 2 6
36951 - 14 14 14 86 86 86 138 138 138 162 162 162
36952 -154 154 154 38 38 38 26 26 26 6 6 6
36953 - 2 2 6 2 2 6 2 2 6 2 2 6
36954 - 86 86 86 46 46 46 14 14 14 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 0 0 0
36956 - 0 0 0 0 0 0 0 0 0 0 0 0
36957 - 0 0 0 0 0 0 0 0 0 0 0 0
36958 - 0 0 0 0 0 0 0 0 0 0 0 0
36959 - 0 0 0 0 0 0 0 0 0 0 0 0
36960 - 0 0 0 0 0 0 0 0 0 0 0 0
36961 - 0 0 0 0 0 0 0 0 0 0 0 0
36962 - 0 0 0 0 0 0 0 0 0 0 0 0
36963 - 0 0 0 0 0 0 0 0 0 0 0 0
36964 - 0 0 0 0 0 0 0 0 0 0 0 0
36965 - 0 0 0 0 0 0 0 0 0 0 0 0
36966 - 0 0 0 0 0 0 0 0 0 0 0 0
36967 - 0 0 0 0 0 0 0 0 0 14 14 14
36968 - 46 46 46 86 86 86 2 2 6 14 14 14
36969 -134 134 134 198 198 198 195 195 195 116 116 116
36970 - 10 10 10 2 2 6 2 2 6 6 6 6
36971 -101 98 89 187 187 187 210 210 210 218 218 218
36972 -214 214 214 134 134 134 14 14 14 6 6 6
36973 - 2 2 6 2 2 6 2 2 6 2 2 6
36974 - 86 86 86 50 50 50 18 18 18 6 6 6
36975 - 0 0 0 0 0 0 0 0 0 0 0 0
36976 - 0 0 0 0 0 0 0 0 0 0 0 0
36977 - 0 0 0 0 0 0 0 0 0 0 0 0
36978 - 0 0 0 0 0 0 0 0 0 0 0 0
36979 - 0 0 0 0 0 0 0 0 0 0 0 0
36980 - 0 0 0 0 0 0 0 0 0 0 0 0
36981 - 0 0 0 0 0 0 0 0 1 0 0 0
36982 - 0 0 1 0 0 1 0 0 1 0 0 0
36983 - 0 0 0 0 0 0 0 0 0 0 0 0
36984 - 0 0 0 0 0 0 0 0 0 0 0 0
36985 - 0 0 0 0 0 0 0 0 0 0 0 0
36986 - 0 0 0 0 0 0 0 0 0 0 0 0
36987 - 0 0 0 0 0 0 0 0 0 14 14 14
36988 - 46 46 46 86 86 86 2 2 6 54 54 54
36989 -218 218 218 195 195 195 226 226 226 246 246 246
36990 - 58 58 58 2 2 6 2 2 6 30 30 30
36991 -210 210 210 253 253 253 174 174 174 123 123 123
36992 -221 221 221 234 234 234 74 74 74 2 2 6
36993 - 2 2 6 2 2 6 2 2 6 2 2 6
36994 - 70 70 70 58 58 58 22 22 22 6 6 6
36995 - 0 0 0 0 0 0 0 0 0 0 0 0
36996 - 0 0 0 0 0 0 0 0 0 0 0 0
36997 - 0 0 0 0 0 0 0 0 0 0 0 0
36998 - 0 0 0 0 0 0 0 0 0 0 0 0
36999 - 0 0 0 0 0 0 0 0 0 0 0 0
37000 - 0 0 0 0 0 0 0 0 0 0 0 0
37001 - 0 0 0 0 0 0 0 0 0 0 0 0
37002 - 0 0 0 0 0 0 0 0 0 0 0 0
37003 - 0 0 0 0 0 0 0 0 0 0 0 0
37004 - 0 0 0 0 0 0 0 0 0 0 0 0
37005 - 0 0 0 0 0 0 0 0 0 0 0 0
37006 - 0 0 0 0 0 0 0 0 0 0 0 0
37007 - 0 0 0 0 0 0 0 0 0 14 14 14
37008 - 46 46 46 82 82 82 2 2 6 106 106 106
37009 -170 170 170 26 26 26 86 86 86 226 226 226
37010 -123 123 123 10 10 10 14 14 14 46 46 46
37011 -231 231 231 190 190 190 6 6 6 70 70 70
37012 - 90 90 90 238 238 238 158 158 158 2 2 6
37013 - 2 2 6 2 2 6 2 2 6 2 2 6
37014 - 70 70 70 58 58 58 22 22 22 6 6 6
37015 - 0 0 0 0 0 0 0 0 0 0 0 0
37016 - 0 0 0 0 0 0 0 0 0 0 0 0
37017 - 0 0 0 0 0 0 0 0 0 0 0 0
37018 - 0 0 0 0 0 0 0 0 0 0 0 0
37019 - 0 0 0 0 0 0 0 0 0 0 0 0
37020 - 0 0 0 0 0 0 0 0 0 0 0 0
37021 - 0 0 0 0 0 0 0 0 1 0 0 0
37022 - 0 0 1 0 0 1 0 0 1 0 0 0
37023 - 0 0 0 0 0 0 0 0 0 0 0 0
37024 - 0 0 0 0 0 0 0 0 0 0 0 0
37025 - 0 0 0 0 0 0 0 0 0 0 0 0
37026 - 0 0 0 0 0 0 0 0 0 0 0 0
37027 - 0 0 0 0 0 0 0 0 0 14 14 14
37028 - 42 42 42 86 86 86 6 6 6 116 116 116
37029 -106 106 106 6 6 6 70 70 70 149 149 149
37030 -128 128 128 18 18 18 38 38 38 54 54 54
37031 -221 221 221 106 106 106 2 2 6 14 14 14
37032 - 46 46 46 190 190 190 198 198 198 2 2 6
37033 - 2 2 6 2 2 6 2 2 6 2 2 6
37034 - 74 74 74 62 62 62 22 22 22 6 6 6
37035 - 0 0 0 0 0 0 0 0 0 0 0 0
37036 - 0 0 0 0 0 0 0 0 0 0 0 0
37037 - 0 0 0 0 0 0 0 0 0 0 0 0
37038 - 0 0 0 0 0 0 0 0 0 0 0 0
37039 - 0 0 0 0 0 0 0 0 0 0 0 0
37040 - 0 0 0 0 0 0 0 0 0 0 0 0
37041 - 0 0 0 0 0 0 0 0 1 0 0 0
37042 - 0 0 1 0 0 0 0 0 1 0 0 0
37043 - 0 0 0 0 0 0 0 0 0 0 0 0
37044 - 0 0 0 0 0 0 0 0 0 0 0 0
37045 - 0 0 0 0 0 0 0 0 0 0 0 0
37046 - 0 0 0 0 0 0 0 0 0 0 0 0
37047 - 0 0 0 0 0 0 0 0 0 14 14 14
37048 - 42 42 42 94 94 94 14 14 14 101 101 101
37049 -128 128 128 2 2 6 18 18 18 116 116 116
37050 -118 98 46 121 92 8 121 92 8 98 78 10
37051 -162 162 162 106 106 106 2 2 6 2 2 6
37052 - 2 2 6 195 195 195 195 195 195 6 6 6
37053 - 2 2 6 2 2 6 2 2 6 2 2 6
37054 - 74 74 74 62 62 62 22 22 22 6 6 6
37055 - 0 0 0 0 0 0 0 0 0 0 0 0
37056 - 0 0 0 0 0 0 0 0 0 0 0 0
37057 - 0 0 0 0 0 0 0 0 0 0 0 0
37058 - 0 0 0 0 0 0 0 0 0 0 0 0
37059 - 0 0 0 0 0 0 0 0 0 0 0 0
37060 - 0 0 0 0 0 0 0 0 0 0 0 0
37061 - 0 0 0 0 0 0 0 0 1 0 0 1
37062 - 0 0 1 0 0 0 0 0 1 0 0 0
37063 - 0 0 0 0 0 0 0 0 0 0 0 0
37064 - 0 0 0 0 0 0 0 0 0 0 0 0
37065 - 0 0 0 0 0 0 0 0 0 0 0 0
37066 - 0 0 0 0 0 0 0 0 0 0 0 0
37067 - 0 0 0 0 0 0 0 0 0 10 10 10
37068 - 38 38 38 90 90 90 14 14 14 58 58 58
37069 -210 210 210 26 26 26 54 38 6 154 114 10
37070 -226 170 11 236 186 11 225 175 15 184 144 12
37071 -215 174 15 175 146 61 37 26 9 2 2 6
37072 - 70 70 70 246 246 246 138 138 138 2 2 6
37073 - 2 2 6 2 2 6 2 2 6 2 2 6
37074 - 70 70 70 66 66 66 26 26 26 6 6 6
37075 - 0 0 0 0 0 0 0 0 0 0 0 0
37076 - 0 0 0 0 0 0 0 0 0 0 0 0
37077 - 0 0 0 0 0 0 0 0 0 0 0 0
37078 - 0 0 0 0 0 0 0 0 0 0 0 0
37079 - 0 0 0 0 0 0 0 0 0 0 0 0
37080 - 0 0 0 0 0 0 0 0 0 0 0 0
37081 - 0 0 0 0 0 0 0 0 0 0 0 0
37082 - 0 0 0 0 0 0 0 0 0 0 0 0
37083 - 0 0 0 0 0 0 0 0 0 0 0 0
37084 - 0 0 0 0 0 0 0 0 0 0 0 0
37085 - 0 0 0 0 0 0 0 0 0 0 0 0
37086 - 0 0 0 0 0 0 0 0 0 0 0 0
37087 - 0 0 0 0 0 0 0 0 0 10 10 10
37088 - 38 38 38 86 86 86 14 14 14 10 10 10
37089 -195 195 195 188 164 115 192 133 9 225 175 15
37090 -239 182 13 234 190 10 232 195 16 232 200 30
37091 -245 207 45 241 208 19 232 195 16 184 144 12
37092 -218 194 134 211 206 186 42 42 42 2 2 6
37093 - 2 2 6 2 2 6 2 2 6 2 2 6
37094 - 50 50 50 74 74 74 30 30 30 6 6 6
37095 - 0 0 0 0 0 0 0 0 0 0 0 0
37096 - 0 0 0 0 0 0 0 0 0 0 0 0
37097 - 0 0 0 0 0 0 0 0 0 0 0 0
37098 - 0 0 0 0 0 0 0 0 0 0 0 0
37099 - 0 0 0 0 0 0 0 0 0 0 0 0
37100 - 0 0 0 0 0 0 0 0 0 0 0 0
37101 - 0 0 0 0 0 0 0 0 0 0 0 0
37102 - 0 0 0 0 0 0 0 0 0 0 0 0
37103 - 0 0 0 0 0 0 0 0 0 0 0 0
37104 - 0 0 0 0 0 0 0 0 0 0 0 0
37105 - 0 0 0 0 0 0 0 0 0 0 0 0
37106 - 0 0 0 0 0 0 0 0 0 0 0 0
37107 - 0 0 0 0 0 0 0 0 0 10 10 10
37108 - 34 34 34 86 86 86 14 14 14 2 2 6
37109 -121 87 25 192 133 9 219 162 10 239 182 13
37110 -236 186 11 232 195 16 241 208 19 244 214 54
37111 -246 218 60 246 218 38 246 215 20 241 208 19
37112 -241 208 19 226 184 13 121 87 25 2 2 6
37113 - 2 2 6 2 2 6 2 2 6 2 2 6
37114 - 50 50 50 82 82 82 34 34 34 10 10 10
37115 - 0 0 0 0 0 0 0 0 0 0 0 0
37116 - 0 0 0 0 0 0 0 0 0 0 0 0
37117 - 0 0 0 0 0 0 0 0 0 0 0 0
37118 - 0 0 0 0 0 0 0 0 0 0 0 0
37119 - 0 0 0 0 0 0 0 0 0 0 0 0
37120 - 0 0 0 0 0 0 0 0 0 0 0 0
37121 - 0 0 0 0 0 0 0 0 0 0 0 0
37122 - 0 0 0 0 0 0 0 0 0 0 0 0
37123 - 0 0 0 0 0 0 0 0 0 0 0 0
37124 - 0 0 0 0 0 0 0 0 0 0 0 0
37125 - 0 0 0 0 0 0 0 0 0 0 0 0
37126 - 0 0 0 0 0 0 0 0 0 0 0 0
37127 - 0 0 0 0 0 0 0 0 0 10 10 10
37128 - 34 34 34 82 82 82 30 30 30 61 42 6
37129 -180 123 7 206 145 10 230 174 11 239 182 13
37130 -234 190 10 238 202 15 241 208 19 246 218 74
37131 -246 218 38 246 215 20 246 215 20 246 215 20
37132 -226 184 13 215 174 15 184 144 12 6 6 6
37133 - 2 2 6 2 2 6 2 2 6 2 2 6
37134 - 26 26 26 94 94 94 42 42 42 14 14 14
37135 - 0 0 0 0 0 0 0 0 0 0 0 0
37136 - 0 0 0 0 0 0 0 0 0 0 0 0
37137 - 0 0 0 0 0 0 0 0 0 0 0 0
37138 - 0 0 0 0 0 0 0 0 0 0 0 0
37139 - 0 0 0 0 0 0 0 0 0 0 0 0
37140 - 0 0 0 0 0 0 0 0 0 0 0 0
37141 - 0 0 0 0 0 0 0 0 0 0 0 0
37142 - 0 0 0 0 0 0 0 0 0 0 0 0
37143 - 0 0 0 0 0 0 0 0 0 0 0 0
37144 - 0 0 0 0 0 0 0 0 0 0 0 0
37145 - 0 0 0 0 0 0 0 0 0 0 0 0
37146 - 0 0 0 0 0 0 0 0 0 0 0 0
37147 - 0 0 0 0 0 0 0 0 0 10 10 10
37148 - 30 30 30 78 78 78 50 50 50 104 69 6
37149 -192 133 9 216 158 10 236 178 12 236 186 11
37150 -232 195 16 241 208 19 244 214 54 245 215 43
37151 -246 215 20 246 215 20 241 208 19 198 155 10
37152 -200 144 11 216 158 10 156 118 10 2 2 6
37153 - 2 2 6 2 2 6 2 2 6 2 2 6
37154 - 6 6 6 90 90 90 54 54 54 18 18 18
37155 - 6 6 6 0 0 0 0 0 0 0 0 0
37156 - 0 0 0 0 0 0 0 0 0 0 0 0
37157 - 0 0 0 0 0 0 0 0 0 0 0 0
37158 - 0 0 0 0 0 0 0 0 0 0 0 0
37159 - 0 0 0 0 0 0 0 0 0 0 0 0
37160 - 0 0 0 0 0 0 0 0 0 0 0 0
37161 - 0 0 0 0 0 0 0 0 0 0 0 0
37162 - 0 0 0 0 0 0 0 0 0 0 0 0
37163 - 0 0 0 0 0 0 0 0 0 0 0 0
37164 - 0 0 0 0 0 0 0 0 0 0 0 0
37165 - 0 0 0 0 0 0 0 0 0 0 0 0
37166 - 0 0 0 0 0 0 0 0 0 0 0 0
37167 - 0 0 0 0 0 0 0 0 0 10 10 10
37168 - 30 30 30 78 78 78 46 46 46 22 22 22
37169 -137 92 6 210 162 10 239 182 13 238 190 10
37170 -238 202 15 241 208 19 246 215 20 246 215 20
37171 -241 208 19 203 166 17 185 133 11 210 150 10
37172 -216 158 10 210 150 10 102 78 10 2 2 6
37173 - 6 6 6 54 54 54 14 14 14 2 2 6
37174 - 2 2 6 62 62 62 74 74 74 30 30 30
37175 - 10 10 10 0 0 0 0 0 0 0 0 0
37176 - 0 0 0 0 0 0 0 0 0 0 0 0
37177 - 0 0 0 0 0 0 0 0 0 0 0 0
37178 - 0 0 0 0 0 0 0 0 0 0 0 0
37179 - 0 0 0 0 0 0 0 0 0 0 0 0
37180 - 0 0 0 0 0 0 0 0 0 0 0 0
37181 - 0 0 0 0 0 0 0 0 0 0 0 0
37182 - 0 0 0 0 0 0 0 0 0 0 0 0
37183 - 0 0 0 0 0 0 0 0 0 0 0 0
37184 - 0 0 0 0 0 0 0 0 0 0 0 0
37185 - 0 0 0 0 0 0 0 0 0 0 0 0
37186 - 0 0 0 0 0 0 0 0 0 0 0 0
37187 - 0 0 0 0 0 0 0 0 0 10 10 10
37188 - 34 34 34 78 78 78 50 50 50 6 6 6
37189 - 94 70 30 139 102 15 190 146 13 226 184 13
37190 -232 200 30 232 195 16 215 174 15 190 146 13
37191 -168 122 10 192 133 9 210 150 10 213 154 11
37192 -202 150 34 182 157 106 101 98 89 2 2 6
37193 - 2 2 6 78 78 78 116 116 116 58 58 58
37194 - 2 2 6 22 22 22 90 90 90 46 46 46
37195 - 18 18 18 6 6 6 0 0 0 0 0 0
37196 - 0 0 0 0 0 0 0 0 0 0 0 0
37197 - 0 0 0 0 0 0 0 0 0 0 0 0
37198 - 0 0 0 0 0 0 0 0 0 0 0 0
37199 - 0 0 0 0 0 0 0 0 0 0 0 0
37200 - 0 0 0 0 0 0 0 0 0 0 0 0
37201 - 0 0 0 0 0 0 0 0 0 0 0 0
37202 - 0 0 0 0 0 0 0 0 0 0 0 0
37203 - 0 0 0 0 0 0 0 0 0 0 0 0
37204 - 0 0 0 0 0 0 0 0 0 0 0 0
37205 - 0 0 0 0 0 0 0 0 0 0 0 0
37206 - 0 0 0 0 0 0 0 0 0 0 0 0
37207 - 0 0 0 0 0 0 0 0 0 10 10 10
37208 - 38 38 38 86 86 86 50 50 50 6 6 6
37209 -128 128 128 174 154 114 156 107 11 168 122 10
37210 -198 155 10 184 144 12 197 138 11 200 144 11
37211 -206 145 10 206 145 10 197 138 11 188 164 115
37212 -195 195 195 198 198 198 174 174 174 14 14 14
37213 - 2 2 6 22 22 22 116 116 116 116 116 116
37214 - 22 22 22 2 2 6 74 74 74 70 70 70
37215 - 30 30 30 10 10 10 0 0 0 0 0 0
37216 - 0 0 0 0 0 0 0 0 0 0 0 0
37217 - 0 0 0 0 0 0 0 0 0 0 0 0
37218 - 0 0 0 0 0 0 0 0 0 0 0 0
37219 - 0 0 0 0 0 0 0 0 0 0 0 0
37220 - 0 0 0 0 0 0 0 0 0 0 0 0
37221 - 0 0 0 0 0 0 0 0 0 0 0 0
37222 - 0 0 0 0 0 0 0 0 0 0 0 0
37223 - 0 0 0 0 0 0 0 0 0 0 0 0
37224 - 0 0 0 0 0 0 0 0 0 0 0 0
37225 - 0 0 0 0 0 0 0 0 0 0 0 0
37226 - 0 0 0 0 0 0 0 0 0 0 0 0
37227 - 0 0 0 0 0 0 6 6 6 18 18 18
37228 - 50 50 50 101 101 101 26 26 26 10 10 10
37229 -138 138 138 190 190 190 174 154 114 156 107 11
37230 -197 138 11 200 144 11 197 138 11 192 133 9
37231 -180 123 7 190 142 34 190 178 144 187 187 187
37232 -202 202 202 221 221 221 214 214 214 66 66 66
37233 - 2 2 6 2 2 6 50 50 50 62 62 62
37234 - 6 6 6 2 2 6 10 10 10 90 90 90
37235 - 50 50 50 18 18 18 6 6 6 0 0 0
37236 - 0 0 0 0 0 0 0 0 0 0 0 0
37237 - 0 0 0 0 0 0 0 0 0 0 0 0
37238 - 0 0 0 0 0 0 0 0 0 0 0 0
37239 - 0 0 0 0 0 0 0 0 0 0 0 0
37240 - 0 0 0 0 0 0 0 0 0 0 0 0
37241 - 0 0 0 0 0 0 0 0 0 0 0 0
37242 - 0 0 0 0 0 0 0 0 0 0 0 0
37243 - 0 0 0 0 0 0 0 0 0 0 0 0
37244 - 0 0 0 0 0 0 0 0 0 0 0 0
37245 - 0 0 0 0 0 0 0 0 0 0 0 0
37246 - 0 0 0 0 0 0 0 0 0 0 0 0
37247 - 0 0 0 0 0 0 10 10 10 34 34 34
37248 - 74 74 74 74 74 74 2 2 6 6 6 6
37249 -144 144 144 198 198 198 190 190 190 178 166 146
37250 -154 121 60 156 107 11 156 107 11 168 124 44
37251 -174 154 114 187 187 187 190 190 190 210 210 210
37252 -246 246 246 253 253 253 253 253 253 182 182 182
37253 - 6 6 6 2 2 6 2 2 6 2 2 6
37254 - 2 2 6 2 2 6 2 2 6 62 62 62
37255 - 74 74 74 34 34 34 14 14 14 0 0 0
37256 - 0 0 0 0 0 0 0 0 0 0 0 0
37257 - 0 0 0 0 0 0 0 0 0 0 0 0
37258 - 0 0 0 0 0 0 0 0 0 0 0 0
37259 - 0 0 0 0 0 0 0 0 0 0 0 0
37260 - 0 0 0 0 0 0 0 0 0 0 0 0
37261 - 0 0 0 0 0 0 0 0 0 0 0 0
37262 - 0 0 0 0 0 0 0 0 0 0 0 0
37263 - 0 0 0 0 0 0 0 0 0 0 0 0
37264 - 0 0 0 0 0 0 0 0 0 0 0 0
37265 - 0 0 0 0 0 0 0 0 0 0 0 0
37266 - 0 0 0 0 0 0 0 0 0 0 0 0
37267 - 0 0 0 10 10 10 22 22 22 54 54 54
37268 - 94 94 94 18 18 18 2 2 6 46 46 46
37269 -234 234 234 221 221 221 190 190 190 190 190 190
37270 -190 190 190 187 187 187 187 187 187 190 190 190
37271 -190 190 190 195 195 195 214 214 214 242 242 242
37272 -253 253 253 253 253 253 253 253 253 253 253 253
37273 - 82 82 82 2 2 6 2 2 6 2 2 6
37274 - 2 2 6 2 2 6 2 2 6 14 14 14
37275 - 86 86 86 54 54 54 22 22 22 6 6 6
37276 - 0 0 0 0 0 0 0 0 0 0 0 0
37277 - 0 0 0 0 0 0 0 0 0 0 0 0
37278 - 0 0 0 0 0 0 0 0 0 0 0 0
37279 - 0 0 0 0 0 0 0 0 0 0 0 0
37280 - 0 0 0 0 0 0 0 0 0 0 0 0
37281 - 0 0 0 0 0 0 0 0 0 0 0 0
37282 - 0 0 0 0 0 0 0 0 0 0 0 0
37283 - 0 0 0 0 0 0 0 0 0 0 0 0
37284 - 0 0 0 0 0 0 0 0 0 0 0 0
37285 - 0 0 0 0 0 0 0 0 0 0 0 0
37286 - 0 0 0 0 0 0 0 0 0 0 0 0
37287 - 6 6 6 18 18 18 46 46 46 90 90 90
37288 - 46 46 46 18 18 18 6 6 6 182 182 182
37289 -253 253 253 246 246 246 206 206 206 190 190 190
37290 -190 190 190 190 190 190 190 190 190 190 190 190
37291 -206 206 206 231 231 231 250 250 250 253 253 253
37292 -253 253 253 253 253 253 253 253 253 253 253 253
37293 -202 202 202 14 14 14 2 2 6 2 2 6
37294 - 2 2 6 2 2 6 2 2 6 2 2 6
37295 - 42 42 42 86 86 86 42 42 42 18 18 18
37296 - 6 6 6 0 0 0 0 0 0 0 0 0
37297 - 0 0 0 0 0 0 0 0 0 0 0 0
37298 - 0 0 0 0 0 0 0 0 0 0 0 0
37299 - 0 0 0 0 0 0 0 0 0 0 0 0
37300 - 0 0 0 0 0 0 0 0 0 0 0 0
37301 - 0 0 0 0 0 0 0 0 0 0 0 0
37302 - 0 0 0 0 0 0 0 0 0 0 0 0
37303 - 0 0 0 0 0 0 0 0 0 0 0 0
37304 - 0 0 0 0 0 0 0 0 0 0 0 0
37305 - 0 0 0 0 0 0 0 0 0 0 0 0
37306 - 0 0 0 0 0 0 0 0 0 6 6 6
37307 - 14 14 14 38 38 38 74 74 74 66 66 66
37308 - 2 2 6 6 6 6 90 90 90 250 250 250
37309 -253 253 253 253 253 253 238 238 238 198 198 198
37310 -190 190 190 190 190 190 195 195 195 221 221 221
37311 -246 246 246 253 253 253 253 253 253 253 253 253
37312 -253 253 253 253 253 253 253 253 253 253 253 253
37313 -253 253 253 82 82 82 2 2 6 2 2 6
37314 - 2 2 6 2 2 6 2 2 6 2 2 6
37315 - 2 2 6 78 78 78 70 70 70 34 34 34
37316 - 14 14 14 6 6 6 0 0 0 0 0 0
37317 - 0 0 0 0 0 0 0 0 0 0 0 0
37318 - 0 0 0 0 0 0 0 0 0 0 0 0
37319 - 0 0 0 0 0 0 0 0 0 0 0 0
37320 - 0 0 0 0 0 0 0 0 0 0 0 0
37321 - 0 0 0 0 0 0 0 0 0 0 0 0
37322 - 0 0 0 0 0 0 0 0 0 0 0 0
37323 - 0 0 0 0 0 0 0 0 0 0 0 0
37324 - 0 0 0 0 0 0 0 0 0 0 0 0
37325 - 0 0 0 0 0 0 0 0 0 0 0 0
37326 - 0 0 0 0 0 0 0 0 0 14 14 14
37327 - 34 34 34 66 66 66 78 78 78 6 6 6
37328 - 2 2 6 18 18 18 218 218 218 253 253 253
37329 -253 253 253 253 253 253 253 253 253 246 246 246
37330 -226 226 226 231 231 231 246 246 246 253 253 253
37331 -253 253 253 253 253 253 253 253 253 253 253 253
37332 -253 253 253 253 253 253 253 253 253 253 253 253
37333 -253 253 253 178 178 178 2 2 6 2 2 6
37334 - 2 2 6 2 2 6 2 2 6 2 2 6
37335 - 2 2 6 18 18 18 90 90 90 62 62 62
37336 - 30 30 30 10 10 10 0 0 0 0 0 0
37337 - 0 0 0 0 0 0 0 0 0 0 0 0
37338 - 0 0 0 0 0 0 0 0 0 0 0 0
37339 - 0 0 0 0 0 0 0 0 0 0 0 0
37340 - 0 0 0 0 0 0 0 0 0 0 0 0
37341 - 0 0 0 0 0 0 0 0 0 0 0 0
37342 - 0 0 0 0 0 0 0 0 0 0 0 0
37343 - 0 0 0 0 0 0 0 0 0 0 0 0
37344 - 0 0 0 0 0 0 0 0 0 0 0 0
37345 - 0 0 0 0 0 0 0 0 0 0 0 0
37346 - 0 0 0 0 0 0 10 10 10 26 26 26
37347 - 58 58 58 90 90 90 18 18 18 2 2 6
37348 - 2 2 6 110 110 110 253 253 253 253 253 253
37349 -253 253 253 253 253 253 253 253 253 253 253 253
37350 -250 250 250 253 253 253 253 253 253 253 253 253
37351 -253 253 253 253 253 253 253 253 253 253 253 253
37352 -253 253 253 253 253 253 253 253 253 253 253 253
37353 -253 253 253 231 231 231 18 18 18 2 2 6
37354 - 2 2 6 2 2 6 2 2 6 2 2 6
37355 - 2 2 6 2 2 6 18 18 18 94 94 94
37356 - 54 54 54 26 26 26 10 10 10 0 0 0
37357 - 0 0 0 0 0 0 0 0 0 0 0 0
37358 - 0 0 0 0 0 0 0 0 0 0 0 0
37359 - 0 0 0 0 0 0 0 0 0 0 0 0
37360 - 0 0 0 0 0 0 0 0 0 0 0 0
37361 - 0 0 0 0 0 0 0 0 0 0 0 0
37362 - 0 0 0 0 0 0 0 0 0 0 0 0
37363 - 0 0 0 0 0 0 0 0 0 0 0 0
37364 - 0 0 0 0 0 0 0 0 0 0 0 0
37365 - 0 0 0 0 0 0 0 0 0 0 0 0
37366 - 0 0 0 6 6 6 22 22 22 50 50 50
37367 - 90 90 90 26 26 26 2 2 6 2 2 6
37368 - 14 14 14 195 195 195 250 250 250 253 253 253
37369 -253 253 253 253 253 253 253 253 253 253 253 253
37370 -253 253 253 253 253 253 253 253 253 253 253 253
37371 -253 253 253 253 253 253 253 253 253 253 253 253
37372 -253 253 253 253 253 253 253 253 253 253 253 253
37373 -250 250 250 242 242 242 54 54 54 2 2 6
37374 - 2 2 6 2 2 6 2 2 6 2 2 6
37375 - 2 2 6 2 2 6 2 2 6 38 38 38
37376 - 86 86 86 50 50 50 22 22 22 6 6 6
37377 - 0 0 0 0 0 0 0 0 0 0 0 0
37378 - 0 0 0 0 0 0 0 0 0 0 0 0
37379 - 0 0 0 0 0 0 0 0 0 0 0 0
37380 - 0 0 0 0 0 0 0 0 0 0 0 0
37381 - 0 0 0 0 0 0 0 0 0 0 0 0
37382 - 0 0 0 0 0 0 0 0 0 0 0 0
37383 - 0 0 0 0 0 0 0 0 0 0 0 0
37384 - 0 0 0 0 0 0 0 0 0 0 0 0
37385 - 0 0 0 0 0 0 0 0 0 0 0 0
37386 - 6 6 6 14 14 14 38 38 38 82 82 82
37387 - 34 34 34 2 2 6 2 2 6 2 2 6
37388 - 42 42 42 195 195 195 246 246 246 253 253 253
37389 -253 253 253 253 253 253 253 253 253 250 250 250
37390 -242 242 242 242 242 242 250 250 250 253 253 253
37391 -253 253 253 253 253 253 253 253 253 253 253 253
37392 -253 253 253 250 250 250 246 246 246 238 238 238
37393 -226 226 226 231 231 231 101 101 101 6 6 6
37394 - 2 2 6 2 2 6 2 2 6 2 2 6
37395 - 2 2 6 2 2 6 2 2 6 2 2 6
37396 - 38 38 38 82 82 82 42 42 42 14 14 14
37397 - 6 6 6 0 0 0 0 0 0 0 0 0
37398 - 0 0 0 0 0 0 0 0 0 0 0 0
37399 - 0 0 0 0 0 0 0 0 0 0 0 0
37400 - 0 0 0 0 0 0 0 0 0 0 0 0
37401 - 0 0 0 0 0 0 0 0 0 0 0 0
37402 - 0 0 0 0 0 0 0 0 0 0 0 0
37403 - 0 0 0 0 0 0 0 0 0 0 0 0
37404 - 0 0 0 0 0 0 0 0 0 0 0 0
37405 - 0 0 0 0 0 0 0 0 0 0 0 0
37406 - 10 10 10 26 26 26 62 62 62 66 66 66
37407 - 2 2 6 2 2 6 2 2 6 6 6 6
37408 - 70 70 70 170 170 170 206 206 206 234 234 234
37409 -246 246 246 250 250 250 250 250 250 238 238 238
37410 -226 226 226 231 231 231 238 238 238 250 250 250
37411 -250 250 250 250 250 250 246 246 246 231 231 231
37412 -214 214 214 206 206 206 202 202 202 202 202 202
37413 -198 198 198 202 202 202 182 182 182 18 18 18
37414 - 2 2 6 2 2 6 2 2 6 2 2 6
37415 - 2 2 6 2 2 6 2 2 6 2 2 6
37416 - 2 2 6 62 62 62 66 66 66 30 30 30
37417 - 10 10 10 0 0 0 0 0 0 0 0 0
37418 - 0 0 0 0 0 0 0 0 0 0 0 0
37419 - 0 0 0 0 0 0 0 0 0 0 0 0
37420 - 0 0 0 0 0 0 0 0 0 0 0 0
37421 - 0 0 0 0 0 0 0 0 0 0 0 0
37422 - 0 0 0 0 0 0 0 0 0 0 0 0
37423 - 0 0 0 0 0 0 0 0 0 0 0 0
37424 - 0 0 0 0 0 0 0 0 0 0 0 0
37425 - 0 0 0 0 0 0 0 0 0 0 0 0
37426 - 14 14 14 42 42 42 82 82 82 18 18 18
37427 - 2 2 6 2 2 6 2 2 6 10 10 10
37428 - 94 94 94 182 182 182 218 218 218 242 242 242
37429 -250 250 250 253 253 253 253 253 253 250 250 250
37430 -234 234 234 253 253 253 253 253 253 253 253 253
37431 -253 253 253 253 253 253 253 253 253 246 246 246
37432 -238 238 238 226 226 226 210 210 210 202 202 202
37433 -195 195 195 195 195 195 210 210 210 158 158 158
37434 - 6 6 6 14 14 14 50 50 50 14 14 14
37435 - 2 2 6 2 2 6 2 2 6 2 2 6
37436 - 2 2 6 6 6 6 86 86 86 46 46 46
37437 - 18 18 18 6 6 6 0 0 0 0 0 0
37438 - 0 0 0 0 0 0 0 0 0 0 0 0
37439 - 0 0 0 0 0 0 0 0 0 0 0 0
37440 - 0 0 0 0 0 0 0 0 0 0 0 0
37441 - 0 0 0 0 0 0 0 0 0 0 0 0
37442 - 0 0 0 0 0 0 0 0 0 0 0 0
37443 - 0 0 0 0 0 0 0 0 0 0 0 0
37444 - 0 0 0 0 0 0 0 0 0 0 0 0
37445 - 0 0 0 0 0 0 0 0 0 6 6 6
37446 - 22 22 22 54 54 54 70 70 70 2 2 6
37447 - 2 2 6 10 10 10 2 2 6 22 22 22
37448 -166 166 166 231 231 231 250 250 250 253 253 253
37449 -253 253 253 253 253 253 253 253 253 250 250 250
37450 -242 242 242 253 253 253 253 253 253 253 253 253
37451 -253 253 253 253 253 253 253 253 253 253 253 253
37452 -253 253 253 253 253 253 253 253 253 246 246 246
37453 -231 231 231 206 206 206 198 198 198 226 226 226
37454 - 94 94 94 2 2 6 6 6 6 38 38 38
37455 - 30 30 30 2 2 6 2 2 6 2 2 6
37456 - 2 2 6 2 2 6 62 62 62 66 66 66
37457 - 26 26 26 10 10 10 0 0 0 0 0 0
37458 - 0 0 0 0 0 0 0 0 0 0 0 0
37459 - 0 0 0 0 0 0 0 0 0 0 0 0
37460 - 0 0 0 0 0 0 0 0 0 0 0 0
37461 - 0 0 0 0 0 0 0 0 0 0 0 0
37462 - 0 0 0 0 0 0 0 0 0 0 0 0
37463 - 0 0 0 0 0 0 0 0 0 0 0 0
37464 - 0 0 0 0 0 0 0 0 0 0 0 0
37465 - 0 0 0 0 0 0 0 0 0 10 10 10
37466 - 30 30 30 74 74 74 50 50 50 2 2 6
37467 - 26 26 26 26 26 26 2 2 6 106 106 106
37468 -238 238 238 253 253 253 253 253 253 253 253 253
37469 -253 253 253 253 253 253 253 253 253 253 253 253
37470 -253 253 253 253 253 253 253 253 253 253 253 253
37471 -253 253 253 253 253 253 253 253 253 253 253 253
37472 -253 253 253 253 253 253 253 253 253 253 253 253
37473 -253 253 253 246 246 246 218 218 218 202 202 202
37474 -210 210 210 14 14 14 2 2 6 2 2 6
37475 - 30 30 30 22 22 22 2 2 6 2 2 6
37476 - 2 2 6 2 2 6 18 18 18 86 86 86
37477 - 42 42 42 14 14 14 0 0 0 0 0 0
37478 - 0 0 0 0 0 0 0 0 0 0 0 0
37479 - 0 0 0 0 0 0 0 0 0 0 0 0
37480 - 0 0 0 0 0 0 0 0 0 0 0 0
37481 - 0 0 0 0 0 0 0 0 0 0 0 0
37482 - 0 0 0 0 0 0 0 0 0 0 0 0
37483 - 0 0 0 0 0 0 0 0 0 0 0 0
37484 - 0 0 0 0 0 0 0 0 0 0 0 0
37485 - 0 0 0 0 0 0 0 0 0 14 14 14
37486 - 42 42 42 90 90 90 22 22 22 2 2 6
37487 - 42 42 42 2 2 6 18 18 18 218 218 218
37488 -253 253 253 253 253 253 253 253 253 253 253 253
37489 -253 253 253 253 253 253 253 253 253 253 253 253
37490 -253 253 253 253 253 253 253 253 253 253 253 253
37491 -253 253 253 253 253 253 253 253 253 253 253 253
37492 -253 253 253 253 253 253 253 253 253 253 253 253
37493 -253 253 253 253 253 253 250 250 250 221 221 221
37494 -218 218 218 101 101 101 2 2 6 14 14 14
37495 - 18 18 18 38 38 38 10 10 10 2 2 6
37496 - 2 2 6 2 2 6 2 2 6 78 78 78
37497 - 58 58 58 22 22 22 6 6 6 0 0 0
37498 - 0 0 0 0 0 0 0 0 0 0 0 0
37499 - 0 0 0 0 0 0 0 0 0 0 0 0
37500 - 0 0 0 0 0 0 0 0 0 0 0 0
37501 - 0 0 0 0 0 0 0 0 0 0 0 0
37502 - 0 0 0 0 0 0 0 0 0 0 0 0
37503 - 0 0 0 0 0 0 0 0 0 0 0 0
37504 - 0 0 0 0 0 0 0 0 0 0 0 0
37505 - 0 0 0 0 0 0 6 6 6 18 18 18
37506 - 54 54 54 82 82 82 2 2 6 26 26 26
37507 - 22 22 22 2 2 6 123 123 123 253 253 253
37508 -253 253 253 253 253 253 253 253 253 253 253 253
37509 -253 253 253 253 253 253 253 253 253 253 253 253
37510 -253 253 253 253 253 253 253 253 253 253 253 253
37511 -253 253 253 253 253 253 253 253 253 253 253 253
37512 -253 253 253 253 253 253 253 253 253 253 253 253
37513 -253 253 253 253 253 253 253 253 253 250 250 250
37514 -238 238 238 198 198 198 6 6 6 38 38 38
37515 - 58 58 58 26 26 26 38 38 38 2 2 6
37516 - 2 2 6 2 2 6 2 2 6 46 46 46
37517 - 78 78 78 30 30 30 10 10 10 0 0 0
37518 - 0 0 0 0 0 0 0 0 0 0 0 0
37519 - 0 0 0 0 0 0 0 0 0 0 0 0
37520 - 0 0 0 0 0 0 0 0 0 0 0 0
37521 - 0 0 0 0 0 0 0 0 0 0 0 0
37522 - 0 0 0 0 0 0 0 0 0 0 0 0
37523 - 0 0 0 0 0 0 0 0 0 0 0 0
37524 - 0 0 0 0 0 0 0 0 0 0 0 0
37525 - 0 0 0 0 0 0 10 10 10 30 30 30
37526 - 74 74 74 58 58 58 2 2 6 42 42 42
37527 - 2 2 6 22 22 22 231 231 231 253 253 253
37528 -253 253 253 253 253 253 253 253 253 253 253 253
37529 -253 253 253 253 253 253 253 253 253 250 250 250
37530 -253 253 253 253 253 253 253 253 253 253 253 253
37531 -253 253 253 253 253 253 253 253 253 253 253 253
37532 -253 253 253 253 253 253 253 253 253 253 253 253
37533 -253 253 253 253 253 253 253 253 253 253 253 253
37534 -253 253 253 246 246 246 46 46 46 38 38 38
37535 - 42 42 42 14 14 14 38 38 38 14 14 14
37536 - 2 2 6 2 2 6 2 2 6 6 6 6
37537 - 86 86 86 46 46 46 14 14 14 0 0 0
37538 - 0 0 0 0 0 0 0 0 0 0 0 0
37539 - 0 0 0 0 0 0 0 0 0 0 0 0
37540 - 0 0 0 0 0 0 0 0 0 0 0 0
37541 - 0 0 0 0 0 0 0 0 0 0 0 0
37542 - 0 0 0 0 0 0 0 0 0 0 0 0
37543 - 0 0 0 0 0 0 0 0 0 0 0 0
37544 - 0 0 0 0 0 0 0 0 0 0 0 0
37545 - 0 0 0 6 6 6 14 14 14 42 42 42
37546 - 90 90 90 18 18 18 18 18 18 26 26 26
37547 - 2 2 6 116 116 116 253 253 253 253 253 253
37548 -253 253 253 253 253 253 253 253 253 253 253 253
37549 -253 253 253 253 253 253 250 250 250 238 238 238
37550 -253 253 253 253 253 253 253 253 253 253 253 253
37551 -253 253 253 253 253 253 253 253 253 253 253 253
37552 -253 253 253 253 253 253 253 253 253 253 253 253
37553 -253 253 253 253 253 253 253 253 253 253 253 253
37554 -253 253 253 253 253 253 94 94 94 6 6 6
37555 - 2 2 6 2 2 6 10 10 10 34 34 34
37556 - 2 2 6 2 2 6 2 2 6 2 2 6
37557 - 74 74 74 58 58 58 22 22 22 6 6 6
37558 - 0 0 0 0 0 0 0 0 0 0 0 0
37559 - 0 0 0 0 0 0 0 0 0 0 0 0
37560 - 0 0 0 0 0 0 0 0 0 0 0 0
37561 - 0 0 0 0 0 0 0 0 0 0 0 0
37562 - 0 0 0 0 0 0 0 0 0 0 0 0
37563 - 0 0 0 0 0 0 0 0 0 0 0 0
37564 - 0 0 0 0 0 0 0 0 0 0 0 0
37565 - 0 0 0 10 10 10 26 26 26 66 66 66
37566 - 82 82 82 2 2 6 38 38 38 6 6 6
37567 - 14 14 14 210 210 210 253 253 253 253 253 253
37568 -253 253 253 253 253 253 253 253 253 253 253 253
37569 -253 253 253 253 253 253 246 246 246 242 242 242
37570 -253 253 253 253 253 253 253 253 253 253 253 253
37571 -253 253 253 253 253 253 253 253 253 253 253 253
37572 -253 253 253 253 253 253 253 253 253 253 253 253
37573 -253 253 253 253 253 253 253 253 253 253 253 253
37574 -253 253 253 253 253 253 144 144 144 2 2 6
37575 - 2 2 6 2 2 6 2 2 6 46 46 46
37576 - 2 2 6 2 2 6 2 2 6 2 2 6
37577 - 42 42 42 74 74 74 30 30 30 10 10 10
37578 - 0 0 0 0 0 0 0 0 0 0 0 0
37579 - 0 0 0 0 0 0 0 0 0 0 0 0
37580 - 0 0 0 0 0 0 0 0 0 0 0 0
37581 - 0 0 0 0 0 0 0 0 0 0 0 0
37582 - 0 0 0 0 0 0 0 0 0 0 0 0
37583 - 0 0 0 0 0 0 0 0 0 0 0 0
37584 - 0 0 0 0 0 0 0 0 0 0 0 0
37585 - 6 6 6 14 14 14 42 42 42 90 90 90
37586 - 26 26 26 6 6 6 42 42 42 2 2 6
37587 - 74 74 74 250 250 250 253 253 253 253 253 253
37588 -253 253 253 253 253 253 253 253 253 253 253 253
37589 -253 253 253 253 253 253 242 242 242 242 242 242
37590 -253 253 253 253 253 253 253 253 253 253 253 253
37591 -253 253 253 253 253 253 253 253 253 253 253 253
37592 -253 253 253 253 253 253 253 253 253 253 253 253
37593 -253 253 253 253 253 253 253 253 253 253 253 253
37594 -253 253 253 253 253 253 182 182 182 2 2 6
37595 - 2 2 6 2 2 6 2 2 6 46 46 46
37596 - 2 2 6 2 2 6 2 2 6 2 2 6
37597 - 10 10 10 86 86 86 38 38 38 10 10 10
37598 - 0 0 0 0 0 0 0 0 0 0 0 0
37599 - 0 0 0 0 0 0 0 0 0 0 0 0
37600 - 0 0 0 0 0 0 0 0 0 0 0 0
37601 - 0 0 0 0 0 0 0 0 0 0 0 0
37602 - 0 0 0 0 0 0 0 0 0 0 0 0
37603 - 0 0 0 0 0 0 0 0 0 0 0 0
37604 - 0 0 0 0 0 0 0 0 0 0 0 0
37605 - 10 10 10 26 26 26 66 66 66 82 82 82
37606 - 2 2 6 22 22 22 18 18 18 2 2 6
37607 -149 149 149 253 253 253 253 253 253 253 253 253
37608 -253 253 253 253 253 253 253 253 253 253 253 253
37609 -253 253 253 253 253 253 234 234 234 242 242 242
37610 -253 253 253 253 253 253 253 253 253 253 253 253
37611 -253 253 253 253 253 253 253 253 253 253 253 253
37612 -253 253 253 253 253 253 253 253 253 253 253 253
37613 -253 253 253 253 253 253 253 253 253 253 253 253
37614 -253 253 253 253 253 253 206 206 206 2 2 6
37615 - 2 2 6 2 2 6 2 2 6 38 38 38
37616 - 2 2 6 2 2 6 2 2 6 2 2 6
37617 - 6 6 6 86 86 86 46 46 46 14 14 14
37618 - 0 0 0 0 0 0 0 0 0 0 0 0
37619 - 0 0 0 0 0 0 0 0 0 0 0 0
37620 - 0 0 0 0 0 0 0 0 0 0 0 0
37621 - 0 0 0 0 0 0 0 0 0 0 0 0
37622 - 0 0 0 0 0 0 0 0 0 0 0 0
37623 - 0 0 0 0 0 0 0 0 0 0 0 0
37624 - 0 0 0 0 0 0 0 0 0 6 6 6
37625 - 18 18 18 46 46 46 86 86 86 18 18 18
37626 - 2 2 6 34 34 34 10 10 10 6 6 6
37627 -210 210 210 253 253 253 253 253 253 253 253 253
37628 -253 253 253 253 253 253 253 253 253 253 253 253
37629 -253 253 253 253 253 253 234 234 234 242 242 242
37630 -253 253 253 253 253 253 253 253 253 253 253 253
37631 -253 253 253 253 253 253 253 253 253 253 253 253
37632 -253 253 253 253 253 253 253 253 253 253 253 253
37633 -253 253 253 253 253 253 253 253 253 253 253 253
37634 -253 253 253 253 253 253 221 221 221 6 6 6
37635 - 2 2 6 2 2 6 6 6 6 30 30 30
37636 - 2 2 6 2 2 6 2 2 6 2 2 6
37637 - 2 2 6 82 82 82 54 54 54 18 18 18
37638 - 6 6 6 0 0 0 0 0 0 0 0 0
37639 - 0 0 0 0 0 0 0 0 0 0 0 0
37640 - 0 0 0 0 0 0 0 0 0 0 0 0
37641 - 0 0 0 0 0 0 0 0 0 0 0 0
37642 - 0 0 0 0 0 0 0 0 0 0 0 0
37643 - 0 0 0 0 0 0 0 0 0 0 0 0
37644 - 0 0 0 0 0 0 0 0 0 10 10 10
37645 - 26 26 26 66 66 66 62 62 62 2 2 6
37646 - 2 2 6 38 38 38 10 10 10 26 26 26
37647 -238 238 238 253 253 253 253 253 253 253 253 253
37648 -253 253 253 253 253 253 253 253 253 253 253 253
37649 -253 253 253 253 253 253 231 231 231 238 238 238
37650 -253 253 253 253 253 253 253 253 253 253 253 253
37651 -253 253 253 253 253 253 253 253 253 253 253 253
37652 -253 253 253 253 253 253 253 253 253 253 253 253
37653 -253 253 253 253 253 253 253 253 253 253 253 253
37654 -253 253 253 253 253 253 231 231 231 6 6 6
37655 - 2 2 6 2 2 6 10 10 10 30 30 30
37656 - 2 2 6 2 2 6 2 2 6 2 2 6
37657 - 2 2 6 66 66 66 58 58 58 22 22 22
37658 - 6 6 6 0 0 0 0 0 0 0 0 0
37659 - 0 0 0 0 0 0 0 0 0 0 0 0
37660 - 0 0 0 0 0 0 0 0 0 0 0 0
37661 - 0 0 0 0 0 0 0 0 0 0 0 0
37662 - 0 0 0 0 0 0 0 0 0 0 0 0
37663 - 0 0 0 0 0 0 0 0 0 0 0 0
37664 - 0 0 0 0 0 0 0 0 0 10 10 10
37665 - 38 38 38 78 78 78 6 6 6 2 2 6
37666 - 2 2 6 46 46 46 14 14 14 42 42 42
37667 -246 246 246 253 253 253 253 253 253 253 253 253
37668 -253 253 253 253 253 253 253 253 253 253 253 253
37669 -253 253 253 253 253 253 231 231 231 242 242 242
37670 -253 253 253 253 253 253 253 253 253 253 253 253
37671 -253 253 253 253 253 253 253 253 253 253 253 253
37672 -253 253 253 253 253 253 253 253 253 253 253 253
37673 -253 253 253 253 253 253 253 253 253 253 253 253
37674 -253 253 253 253 253 253 234 234 234 10 10 10
37675 - 2 2 6 2 2 6 22 22 22 14 14 14
37676 - 2 2 6 2 2 6 2 2 6 2 2 6
37677 - 2 2 6 66 66 66 62 62 62 22 22 22
37678 - 6 6 6 0 0 0 0 0 0 0 0 0
37679 - 0 0 0 0 0 0 0 0 0 0 0 0
37680 - 0 0 0 0 0 0 0 0 0 0 0 0
37681 - 0 0 0 0 0 0 0 0 0 0 0 0
37682 - 0 0 0 0 0 0 0 0 0 0 0 0
37683 - 0 0 0 0 0 0 0 0 0 0 0 0
37684 - 0 0 0 0 0 0 6 6 6 18 18 18
37685 - 50 50 50 74 74 74 2 2 6 2 2 6
37686 - 14 14 14 70 70 70 34 34 34 62 62 62
37687 -250 250 250 253 253 253 253 253 253 253 253 253
37688 -253 253 253 253 253 253 253 253 253 253 253 253
37689 -253 253 253 253 253 253 231 231 231 246 246 246
37690 -253 253 253 253 253 253 253 253 253 253 253 253
37691 -253 253 253 253 253 253 253 253 253 253 253 253
37692 -253 253 253 253 253 253 253 253 253 253 253 253
37693 -253 253 253 253 253 253 253 253 253 253 253 253
37694 -253 253 253 253 253 253 234 234 234 14 14 14
37695 - 2 2 6 2 2 6 30 30 30 2 2 6
37696 - 2 2 6 2 2 6 2 2 6 2 2 6
37697 - 2 2 6 66 66 66 62 62 62 22 22 22
37698 - 6 6 6 0 0 0 0 0 0 0 0 0
37699 - 0 0 0 0 0 0 0 0 0 0 0 0
37700 - 0 0 0 0 0 0 0 0 0 0 0 0
37701 - 0 0 0 0 0 0 0 0 0 0 0 0
37702 - 0 0 0 0 0 0 0 0 0 0 0 0
37703 - 0 0 0 0 0 0 0 0 0 0 0 0
37704 - 0 0 0 0 0 0 6 6 6 18 18 18
37705 - 54 54 54 62 62 62 2 2 6 2 2 6
37706 - 2 2 6 30 30 30 46 46 46 70 70 70
37707 -250 250 250 253 253 253 253 253 253 253 253 253
37708 -253 253 253 253 253 253 253 253 253 253 253 253
37709 -253 253 253 253 253 253 231 231 231 246 246 246
37710 -253 253 253 253 253 253 253 253 253 253 253 253
37711 -253 253 253 253 253 253 253 253 253 253 253 253
37712 -253 253 253 253 253 253 253 253 253 253 253 253
37713 -253 253 253 253 253 253 253 253 253 253 253 253
37714 -253 253 253 253 253 253 226 226 226 10 10 10
37715 - 2 2 6 6 6 6 30 30 30 2 2 6
37716 - 2 2 6 2 2 6 2 2 6 2 2 6
37717 - 2 2 6 66 66 66 58 58 58 22 22 22
37718 - 6 6 6 0 0 0 0 0 0 0 0 0
37719 - 0 0 0 0 0 0 0 0 0 0 0 0
37720 - 0 0 0 0 0 0 0 0 0 0 0 0
37721 - 0 0 0 0 0 0 0 0 0 0 0 0
37722 - 0 0 0 0 0 0 0 0 0 0 0 0
37723 - 0 0 0 0 0 0 0 0 0 0 0 0
37724 - 0 0 0 0 0 0 6 6 6 22 22 22
37725 - 58 58 58 62 62 62 2 2 6 2 2 6
37726 - 2 2 6 2 2 6 30 30 30 78 78 78
37727 -250 250 250 253 253 253 253 253 253 253 253 253
37728 -253 253 253 253 253 253 253 253 253 253 253 253
37729 -253 253 253 253 253 253 231 231 231 246 246 246
37730 -253 253 253 253 253 253 253 253 253 253 253 253
37731 -253 253 253 253 253 253 253 253 253 253 253 253
37732 -253 253 253 253 253 253 253 253 253 253 253 253
37733 -253 253 253 253 253 253 253 253 253 253 253 253
37734 -253 253 253 253 253 253 206 206 206 2 2 6
37735 - 22 22 22 34 34 34 18 14 6 22 22 22
37736 - 26 26 26 18 18 18 6 6 6 2 2 6
37737 - 2 2 6 82 82 82 54 54 54 18 18 18
37738 - 6 6 6 0 0 0 0 0 0 0 0 0
37739 - 0 0 0 0 0 0 0 0 0 0 0 0
37740 - 0 0 0 0 0 0 0 0 0 0 0 0
37741 - 0 0 0 0 0 0 0 0 0 0 0 0
37742 - 0 0 0 0 0 0 0 0 0 0 0 0
37743 - 0 0 0 0 0 0 0 0 0 0 0 0
37744 - 0 0 0 0 0 0 6 6 6 26 26 26
37745 - 62 62 62 106 106 106 74 54 14 185 133 11
37746 -210 162 10 121 92 8 6 6 6 62 62 62
37747 -238 238 238 253 253 253 253 253 253 253 253 253
37748 -253 253 253 253 253 253 253 253 253 253 253 253
37749 -253 253 253 253 253 253 231 231 231 246 246 246
37750 -253 253 253 253 253 253 253 253 253 253 253 253
37751 -253 253 253 253 253 253 253 253 253 253 253 253
37752 -253 253 253 253 253 253 253 253 253 253 253 253
37753 -253 253 253 253 253 253 253 253 253 253 253 253
37754 -253 253 253 253 253 253 158 158 158 18 18 18
37755 - 14 14 14 2 2 6 2 2 6 2 2 6
37756 - 6 6 6 18 18 18 66 66 66 38 38 38
37757 - 6 6 6 94 94 94 50 50 50 18 18 18
37758 - 6 6 6 0 0 0 0 0 0 0 0 0
37759 - 0 0 0 0 0 0 0 0 0 0 0 0
37760 - 0 0 0 0 0 0 0 0 0 0 0 0
37761 - 0 0 0 0 0 0 0 0 0 0 0 0
37762 - 0 0 0 0 0 0 0 0 0 0 0 0
37763 - 0 0 0 0 0 0 0 0 0 6 6 6
37764 - 10 10 10 10 10 10 18 18 18 38 38 38
37765 - 78 78 78 142 134 106 216 158 10 242 186 14
37766 -246 190 14 246 190 14 156 118 10 10 10 10
37767 - 90 90 90 238 238 238 253 253 253 253 253 253
37768 -253 253 253 253 253 253 253 253 253 253 253 253
37769 -253 253 253 253 253 253 231 231 231 250 250 250
37770 -253 253 253 253 253 253 253 253 253 253 253 253
37771 -253 253 253 253 253 253 253 253 253 253 253 253
37772 -253 253 253 253 253 253 253 253 253 253 253 253
37773 -253 253 253 253 253 253 253 253 253 246 230 190
37774 -238 204 91 238 204 91 181 142 44 37 26 9
37775 - 2 2 6 2 2 6 2 2 6 2 2 6
37776 - 2 2 6 2 2 6 38 38 38 46 46 46
37777 - 26 26 26 106 106 106 54 54 54 18 18 18
37778 - 6 6 6 0 0 0 0 0 0 0 0 0
37779 - 0 0 0 0 0 0 0 0 0 0 0 0
37780 - 0 0 0 0 0 0 0 0 0 0 0 0
37781 - 0 0 0 0 0 0 0 0 0 0 0 0
37782 - 0 0 0 0 0 0 0 0 0 0 0 0
37783 - 0 0 0 6 6 6 14 14 14 22 22 22
37784 - 30 30 30 38 38 38 50 50 50 70 70 70
37785 -106 106 106 190 142 34 226 170 11 242 186 14
37786 -246 190 14 246 190 14 246 190 14 154 114 10
37787 - 6 6 6 74 74 74 226 226 226 253 253 253
37788 -253 253 253 253 253 253 253 253 253 253 253 253
37789 -253 253 253 253 253 253 231 231 231 250 250 250
37790 -253 253 253 253 253 253 253 253 253 253 253 253
37791 -253 253 253 253 253 253 253 253 253 253 253 253
37792 -253 253 253 253 253 253 253 253 253 253 253 253
37793 -253 253 253 253 253 253 253 253 253 228 184 62
37794 -241 196 14 241 208 19 232 195 16 38 30 10
37795 - 2 2 6 2 2 6 2 2 6 2 2 6
37796 - 2 2 6 6 6 6 30 30 30 26 26 26
37797 -203 166 17 154 142 90 66 66 66 26 26 26
37798 - 6 6 6 0 0 0 0 0 0 0 0 0
37799 - 0 0 0 0 0 0 0 0 0 0 0 0
37800 - 0 0 0 0 0 0 0 0 0 0 0 0
37801 - 0 0 0 0 0 0 0 0 0 0 0 0
37802 - 0 0 0 0 0 0 0 0 0 0 0 0
37803 - 6 6 6 18 18 18 38 38 38 58 58 58
37804 - 78 78 78 86 86 86 101 101 101 123 123 123
37805 -175 146 61 210 150 10 234 174 13 246 186 14
37806 -246 190 14 246 190 14 246 190 14 238 190 10
37807 -102 78 10 2 2 6 46 46 46 198 198 198
37808 -253 253 253 253 253 253 253 253 253 253 253 253
37809 -253 253 253 253 253 253 234 234 234 242 242 242
37810 -253 253 253 253 253 253 253 253 253 253 253 253
37811 -253 253 253 253 253 253 253 253 253 253 253 253
37812 -253 253 253 253 253 253 253 253 253 253 253 253
37813 -253 253 253 253 253 253 253 253 253 224 178 62
37814 -242 186 14 241 196 14 210 166 10 22 18 6
37815 - 2 2 6 2 2 6 2 2 6 2 2 6
37816 - 2 2 6 2 2 6 6 6 6 121 92 8
37817 -238 202 15 232 195 16 82 82 82 34 34 34
37818 - 10 10 10 0 0 0 0 0 0 0 0 0
37819 - 0 0 0 0 0 0 0 0 0 0 0 0
37820 - 0 0 0 0 0 0 0 0 0 0 0 0
37821 - 0 0 0 0 0 0 0 0 0 0 0 0
37822 - 0 0 0 0 0 0 0 0 0 0 0 0
37823 - 14 14 14 38 38 38 70 70 70 154 122 46
37824 -190 142 34 200 144 11 197 138 11 197 138 11
37825 -213 154 11 226 170 11 242 186 14 246 190 14
37826 -246 190 14 246 190 14 246 190 14 246 190 14
37827 -225 175 15 46 32 6 2 2 6 22 22 22
37828 -158 158 158 250 250 250 253 253 253 253 253 253
37829 -253 253 253 253 253 253 253 253 253 253 253 253
37830 -253 253 253 253 253 253 253 253 253 253 253 253
37831 -253 253 253 253 253 253 253 253 253 253 253 253
37832 -253 253 253 253 253 253 253 253 253 253 253 253
37833 -253 253 253 250 250 250 242 242 242 224 178 62
37834 -239 182 13 236 186 11 213 154 11 46 32 6
37835 - 2 2 6 2 2 6 2 2 6 2 2 6
37836 - 2 2 6 2 2 6 61 42 6 225 175 15
37837 -238 190 10 236 186 11 112 100 78 42 42 42
37838 - 14 14 14 0 0 0 0 0 0 0 0 0
37839 - 0 0 0 0 0 0 0 0 0 0 0 0
37840 - 0 0 0 0 0 0 0 0 0 0 0 0
37841 - 0 0 0 0 0 0 0 0 0 0 0 0
37842 - 0 0 0 0 0 0 0 0 0 6 6 6
37843 - 22 22 22 54 54 54 154 122 46 213 154 11
37844 -226 170 11 230 174 11 226 170 11 226 170 11
37845 -236 178 12 242 186 14 246 190 14 246 190 14
37846 -246 190 14 246 190 14 246 190 14 246 190 14
37847 -241 196 14 184 144 12 10 10 10 2 2 6
37848 - 6 6 6 116 116 116 242 242 242 253 253 253
37849 -253 253 253 253 253 253 253 253 253 253 253 253
37850 -253 253 253 253 253 253 253 253 253 253 253 253
37851 -253 253 253 253 253 253 253 253 253 253 253 253
37852 -253 253 253 253 253 253 253 253 253 253 253 253
37853 -253 253 253 231 231 231 198 198 198 214 170 54
37854 -236 178 12 236 178 12 210 150 10 137 92 6
37855 - 18 14 6 2 2 6 2 2 6 2 2 6
37856 - 6 6 6 70 47 6 200 144 11 236 178 12
37857 -239 182 13 239 182 13 124 112 88 58 58 58
37858 - 22 22 22 6 6 6 0 0 0 0 0 0
37859 - 0 0 0 0 0 0 0 0 0 0 0 0
37860 - 0 0 0 0 0 0 0 0 0 0 0 0
37861 - 0 0 0 0 0 0 0 0 0 0 0 0
37862 - 0 0 0 0 0 0 0 0 0 10 10 10
37863 - 30 30 30 70 70 70 180 133 36 226 170 11
37864 -239 182 13 242 186 14 242 186 14 246 186 14
37865 -246 190 14 246 190 14 246 190 14 246 190 14
37866 -246 190 14 246 190 14 246 190 14 246 190 14
37867 -246 190 14 232 195 16 98 70 6 2 2 6
37868 - 2 2 6 2 2 6 66 66 66 221 221 221
37869 -253 253 253 253 253 253 253 253 253 253 253 253
37870 -253 253 253 253 253 253 253 253 253 253 253 253
37871 -253 253 253 253 253 253 253 253 253 253 253 253
37872 -253 253 253 253 253 253 253 253 253 253 253 253
37873 -253 253 253 206 206 206 198 198 198 214 166 58
37874 -230 174 11 230 174 11 216 158 10 192 133 9
37875 -163 110 8 116 81 8 102 78 10 116 81 8
37876 -167 114 7 197 138 11 226 170 11 239 182 13
37877 -242 186 14 242 186 14 162 146 94 78 78 78
37878 - 34 34 34 14 14 14 6 6 6 0 0 0
37879 - 0 0 0 0 0 0 0 0 0 0 0 0
37880 - 0 0 0 0 0 0 0 0 0 0 0 0
37881 - 0 0 0 0 0 0 0 0 0 0 0 0
37882 - 0 0 0 0 0 0 0 0 0 6 6 6
37883 - 30 30 30 78 78 78 190 142 34 226 170 11
37884 -239 182 13 246 190 14 246 190 14 246 190 14
37885 -246 190 14 246 190 14 246 190 14 246 190 14
37886 -246 190 14 246 190 14 246 190 14 246 190 14
37887 -246 190 14 241 196 14 203 166 17 22 18 6
37888 - 2 2 6 2 2 6 2 2 6 38 38 38
37889 -218 218 218 253 253 253 253 253 253 253 253 253
37890 -253 253 253 253 253 253 253 253 253 253 253 253
37891 -253 253 253 253 253 253 253 253 253 253 253 253
37892 -253 253 253 253 253 253 253 253 253 253 253 253
37893 -250 250 250 206 206 206 198 198 198 202 162 69
37894 -226 170 11 236 178 12 224 166 10 210 150 10
37895 -200 144 11 197 138 11 192 133 9 197 138 11
37896 -210 150 10 226 170 11 242 186 14 246 190 14
37897 -246 190 14 246 186 14 225 175 15 124 112 88
37898 - 62 62 62 30 30 30 14 14 14 6 6 6
37899 - 0 0 0 0 0 0 0 0 0 0 0 0
37900 - 0 0 0 0 0 0 0 0 0 0 0 0
37901 - 0 0 0 0 0 0 0 0 0 0 0 0
37902 - 0 0 0 0 0 0 0 0 0 10 10 10
37903 - 30 30 30 78 78 78 174 135 50 224 166 10
37904 -239 182 13 246 190 14 246 190 14 246 190 14
37905 -246 190 14 246 190 14 246 190 14 246 190 14
37906 -246 190 14 246 190 14 246 190 14 246 190 14
37907 -246 190 14 246 190 14 241 196 14 139 102 15
37908 - 2 2 6 2 2 6 2 2 6 2 2 6
37909 - 78 78 78 250 250 250 253 253 253 253 253 253
37910 -253 253 253 253 253 253 253 253 253 253 253 253
37911 -253 253 253 253 253 253 253 253 253 253 253 253
37912 -253 253 253 253 253 253 253 253 253 253 253 253
37913 -250 250 250 214 214 214 198 198 198 190 150 46
37914 -219 162 10 236 178 12 234 174 13 224 166 10
37915 -216 158 10 213 154 11 213 154 11 216 158 10
37916 -226 170 11 239 182 13 246 190 14 246 190 14
37917 -246 190 14 246 190 14 242 186 14 206 162 42
37918 -101 101 101 58 58 58 30 30 30 14 14 14
37919 - 6 6 6 0 0 0 0 0 0 0 0 0
37920 - 0 0 0 0 0 0 0 0 0 0 0 0
37921 - 0 0 0 0 0 0 0 0 0 0 0 0
37922 - 0 0 0 0 0 0 0 0 0 10 10 10
37923 - 30 30 30 74 74 74 174 135 50 216 158 10
37924 -236 178 12 246 190 14 246 190 14 246 190 14
37925 -246 190 14 246 190 14 246 190 14 246 190 14
37926 -246 190 14 246 190 14 246 190 14 246 190 14
37927 -246 190 14 246 190 14 241 196 14 226 184 13
37928 - 61 42 6 2 2 6 2 2 6 2 2 6
37929 - 22 22 22 238 238 238 253 253 253 253 253 253
37930 -253 253 253 253 253 253 253 253 253 253 253 253
37931 -253 253 253 253 253 253 253 253 253 253 253 253
37932 -253 253 253 253 253 253 253 253 253 253 253 253
37933 -253 253 253 226 226 226 187 187 187 180 133 36
37934 -216 158 10 236 178 12 239 182 13 236 178 12
37935 -230 174 11 226 170 11 226 170 11 230 174 11
37936 -236 178 12 242 186 14 246 190 14 246 190 14
37937 -246 190 14 246 190 14 246 186 14 239 182 13
37938 -206 162 42 106 106 106 66 66 66 34 34 34
37939 - 14 14 14 6 6 6 0 0 0 0 0 0
37940 - 0 0 0 0 0 0 0 0 0 0 0 0
37941 - 0 0 0 0 0 0 0 0 0 0 0 0
37942 - 0 0 0 0 0 0 0 0 0 6 6 6
37943 - 26 26 26 70 70 70 163 133 67 213 154 11
37944 -236 178 12 246 190 14 246 190 14 246 190 14
37945 -246 190 14 246 190 14 246 190 14 246 190 14
37946 -246 190 14 246 190 14 246 190 14 246 190 14
37947 -246 190 14 246 190 14 246 190 14 241 196 14
37948 -190 146 13 18 14 6 2 2 6 2 2 6
37949 - 46 46 46 246 246 246 253 253 253 253 253 253
37950 -253 253 253 253 253 253 253 253 253 253 253 253
37951 -253 253 253 253 253 253 253 253 253 253 253 253
37952 -253 253 253 253 253 253 253 253 253 253 253 253
37953 -253 253 253 221 221 221 86 86 86 156 107 11
37954 -216 158 10 236 178 12 242 186 14 246 186 14
37955 -242 186 14 239 182 13 239 182 13 242 186 14
37956 -242 186 14 246 186 14 246 190 14 246 190 14
37957 -246 190 14 246 190 14 246 190 14 246 190 14
37958 -242 186 14 225 175 15 142 122 72 66 66 66
37959 - 30 30 30 10 10 10 0 0 0 0 0 0
37960 - 0 0 0 0 0 0 0 0 0 0 0 0
37961 - 0 0 0 0 0 0 0 0 0 0 0 0
37962 - 0 0 0 0 0 0 0 0 0 6 6 6
37963 - 26 26 26 70 70 70 163 133 67 210 150 10
37964 -236 178 12 246 190 14 246 190 14 246 190 14
37965 -246 190 14 246 190 14 246 190 14 246 190 14
37966 -246 190 14 246 190 14 246 190 14 246 190 14
37967 -246 190 14 246 190 14 246 190 14 246 190 14
37968 -232 195 16 121 92 8 34 34 34 106 106 106
37969 -221 221 221 253 253 253 253 253 253 253 253 253
37970 -253 253 253 253 253 253 253 253 253 253 253 253
37971 -253 253 253 253 253 253 253 253 253 253 253 253
37972 -253 253 253 253 253 253 253 253 253 253 253 253
37973 -242 242 242 82 82 82 18 14 6 163 110 8
37974 -216 158 10 236 178 12 242 186 14 246 190 14
37975 -246 190 14 246 190 14 246 190 14 246 190 14
37976 -246 190 14 246 190 14 246 190 14 246 190 14
37977 -246 190 14 246 190 14 246 190 14 246 190 14
37978 -246 190 14 246 190 14 242 186 14 163 133 67
37979 - 46 46 46 18 18 18 6 6 6 0 0 0
37980 - 0 0 0 0 0 0 0 0 0 0 0 0
37981 - 0 0 0 0 0 0 0 0 0 0 0 0
37982 - 0 0 0 0 0 0 0 0 0 10 10 10
37983 - 30 30 30 78 78 78 163 133 67 210 150 10
37984 -236 178 12 246 186 14 246 190 14 246 190 14
37985 -246 190 14 246 190 14 246 190 14 246 190 14
37986 -246 190 14 246 190 14 246 190 14 246 190 14
37987 -246 190 14 246 190 14 246 190 14 246 190 14
37988 -241 196 14 215 174 15 190 178 144 253 253 253
37989 -253 253 253 253 253 253 253 253 253 253 253 253
37990 -253 253 253 253 253 253 253 253 253 253 253 253
37991 -253 253 253 253 253 253 253 253 253 253 253 253
37992 -253 253 253 253 253 253 253 253 253 218 218 218
37993 - 58 58 58 2 2 6 22 18 6 167 114 7
37994 -216 158 10 236 178 12 246 186 14 246 190 14
37995 -246 190 14 246 190 14 246 190 14 246 190 14
37996 -246 190 14 246 190 14 246 190 14 246 190 14
37997 -246 190 14 246 190 14 246 190 14 246 190 14
37998 -246 190 14 246 186 14 242 186 14 190 150 46
37999 - 54 54 54 22 22 22 6 6 6 0 0 0
38000 - 0 0 0 0 0 0 0 0 0 0 0 0
38001 - 0 0 0 0 0 0 0 0 0 0 0 0
38002 - 0 0 0 0 0 0 0 0 0 14 14 14
38003 - 38 38 38 86 86 86 180 133 36 213 154 11
38004 -236 178 12 246 186 14 246 190 14 246 190 14
38005 -246 190 14 246 190 14 246 190 14 246 190 14
38006 -246 190 14 246 190 14 246 190 14 246 190 14
38007 -246 190 14 246 190 14 246 190 14 246 190 14
38008 -246 190 14 232 195 16 190 146 13 214 214 214
38009 -253 253 253 253 253 253 253 253 253 253 253 253
38010 -253 253 253 253 253 253 253 253 253 253 253 253
38011 -253 253 253 253 253 253 253 253 253 253 253 253
38012 -253 253 253 250 250 250 170 170 170 26 26 26
38013 - 2 2 6 2 2 6 37 26 9 163 110 8
38014 -219 162 10 239 182 13 246 186 14 246 190 14
38015 -246 190 14 246 190 14 246 190 14 246 190 14
38016 -246 190 14 246 190 14 246 190 14 246 190 14
38017 -246 190 14 246 190 14 246 190 14 246 190 14
38018 -246 186 14 236 178 12 224 166 10 142 122 72
38019 - 46 46 46 18 18 18 6 6 6 0 0 0
38020 - 0 0 0 0 0 0 0 0 0 0 0 0
38021 - 0 0 0 0 0 0 0 0 0 0 0 0
38022 - 0 0 0 0 0 0 6 6 6 18 18 18
38023 - 50 50 50 109 106 95 192 133 9 224 166 10
38024 -242 186 14 246 190 14 246 190 14 246 190 14
38025 -246 190 14 246 190 14 246 190 14 246 190 14
38026 -246 190 14 246 190 14 246 190 14 246 190 14
38027 -246 190 14 246 190 14 246 190 14 246 190 14
38028 -242 186 14 226 184 13 210 162 10 142 110 46
38029 -226 226 226 253 253 253 253 253 253 253 253 253
38030 -253 253 253 253 253 253 253 253 253 253 253 253
38031 -253 253 253 253 253 253 253 253 253 253 253 253
38032 -198 198 198 66 66 66 2 2 6 2 2 6
38033 - 2 2 6 2 2 6 50 34 6 156 107 11
38034 -219 162 10 239 182 13 246 186 14 246 190 14
38035 -246 190 14 246 190 14 246 190 14 246 190 14
38036 -246 190 14 246 190 14 246 190 14 246 190 14
38037 -246 190 14 246 190 14 246 190 14 242 186 14
38038 -234 174 13 213 154 11 154 122 46 66 66 66
38039 - 30 30 30 10 10 10 0 0 0 0 0 0
38040 - 0 0 0 0 0 0 0 0 0 0 0 0
38041 - 0 0 0 0 0 0 0 0 0 0 0 0
38042 - 0 0 0 0 0 0 6 6 6 22 22 22
38043 - 58 58 58 154 121 60 206 145 10 234 174 13
38044 -242 186 14 246 186 14 246 190 14 246 190 14
38045 -246 190 14 246 190 14 246 190 14 246 190 14
38046 -246 190 14 246 190 14 246 190 14 246 190 14
38047 -246 190 14 246 190 14 246 190 14 246 190 14
38048 -246 186 14 236 178 12 210 162 10 163 110 8
38049 - 61 42 6 138 138 138 218 218 218 250 250 250
38050 -253 253 253 253 253 253 253 253 253 250 250 250
38051 -242 242 242 210 210 210 144 144 144 66 66 66
38052 - 6 6 6 2 2 6 2 2 6 2 2 6
38053 - 2 2 6 2 2 6 61 42 6 163 110 8
38054 -216 158 10 236 178 12 246 190 14 246 190 14
38055 -246 190 14 246 190 14 246 190 14 246 190 14
38056 -246 190 14 246 190 14 246 190 14 246 190 14
38057 -246 190 14 239 182 13 230 174 11 216 158 10
38058 -190 142 34 124 112 88 70 70 70 38 38 38
38059 - 18 18 18 6 6 6 0 0 0 0 0 0
38060 - 0 0 0 0 0 0 0 0 0 0 0 0
38061 - 0 0 0 0 0 0 0 0 0 0 0 0
38062 - 0 0 0 0 0 0 6 6 6 22 22 22
38063 - 62 62 62 168 124 44 206 145 10 224 166 10
38064 -236 178 12 239 182 13 242 186 14 242 186 14
38065 -246 186 14 246 190 14 246 190 14 246 190 14
38066 -246 190 14 246 190 14 246 190 14 246 190 14
38067 -246 190 14 246 190 14 246 190 14 246 190 14
38068 -246 190 14 236 178 12 216 158 10 175 118 6
38069 - 80 54 7 2 2 6 6 6 6 30 30 30
38070 - 54 54 54 62 62 62 50 50 50 38 38 38
38071 - 14 14 14 2 2 6 2 2 6 2 2 6
38072 - 2 2 6 2 2 6 2 2 6 2 2 6
38073 - 2 2 6 6 6 6 80 54 7 167 114 7
38074 -213 154 11 236 178 12 246 190 14 246 190 14
38075 -246 190 14 246 190 14 246 190 14 246 190 14
38076 -246 190 14 242 186 14 239 182 13 239 182 13
38077 -230 174 11 210 150 10 174 135 50 124 112 88
38078 - 82 82 82 54 54 54 34 34 34 18 18 18
38079 - 6 6 6 0 0 0 0 0 0 0 0 0
38080 - 0 0 0 0 0 0 0 0 0 0 0 0
38081 - 0 0 0 0 0 0 0 0 0 0 0 0
38082 - 0 0 0 0 0 0 6 6 6 18 18 18
38083 - 50 50 50 158 118 36 192 133 9 200 144 11
38084 -216 158 10 219 162 10 224 166 10 226 170 11
38085 -230 174 11 236 178 12 239 182 13 239 182 13
38086 -242 186 14 246 186 14 246 190 14 246 190 14
38087 -246 190 14 246 190 14 246 190 14 246 190 14
38088 -246 186 14 230 174 11 210 150 10 163 110 8
38089 -104 69 6 10 10 10 2 2 6 2 2 6
38090 - 2 2 6 2 2 6 2 2 6 2 2 6
38091 - 2 2 6 2 2 6 2 2 6 2 2 6
38092 - 2 2 6 2 2 6 2 2 6 2 2 6
38093 - 2 2 6 6 6 6 91 60 6 167 114 7
38094 -206 145 10 230 174 11 242 186 14 246 190 14
38095 -246 190 14 246 190 14 246 186 14 242 186 14
38096 -239 182 13 230 174 11 224 166 10 213 154 11
38097 -180 133 36 124 112 88 86 86 86 58 58 58
38098 - 38 38 38 22 22 22 10 10 10 6 6 6
38099 - 0 0 0 0 0 0 0 0 0 0 0 0
38100 - 0 0 0 0 0 0 0 0 0 0 0 0
38101 - 0 0 0 0 0 0 0 0 0 0 0 0
38102 - 0 0 0 0 0 0 0 0 0 14 14 14
38103 - 34 34 34 70 70 70 138 110 50 158 118 36
38104 -167 114 7 180 123 7 192 133 9 197 138 11
38105 -200 144 11 206 145 10 213 154 11 219 162 10
38106 -224 166 10 230 174 11 239 182 13 242 186 14
38107 -246 186 14 246 186 14 246 186 14 246 186 14
38108 -239 182 13 216 158 10 185 133 11 152 99 6
38109 -104 69 6 18 14 6 2 2 6 2 2 6
38110 - 2 2 6 2 2 6 2 2 6 2 2 6
38111 - 2 2 6 2 2 6 2 2 6 2 2 6
38112 - 2 2 6 2 2 6 2 2 6 2 2 6
38113 - 2 2 6 6 6 6 80 54 7 152 99 6
38114 -192 133 9 219 162 10 236 178 12 239 182 13
38115 -246 186 14 242 186 14 239 182 13 236 178 12
38116 -224 166 10 206 145 10 192 133 9 154 121 60
38117 - 94 94 94 62 62 62 42 42 42 22 22 22
38118 - 14 14 14 6 6 6 0 0 0 0 0 0
38119 - 0 0 0 0 0 0 0 0 0 0 0 0
38120 - 0 0 0 0 0 0 0 0 0 0 0 0
38121 - 0 0 0 0 0 0 0 0 0 0 0 0
38122 - 0 0 0 0 0 0 0 0 0 6 6 6
38123 - 18 18 18 34 34 34 58 58 58 78 78 78
38124 -101 98 89 124 112 88 142 110 46 156 107 11
38125 -163 110 8 167 114 7 175 118 6 180 123 7
38126 -185 133 11 197 138 11 210 150 10 219 162 10
38127 -226 170 11 236 178 12 236 178 12 234 174 13
38128 -219 162 10 197 138 11 163 110 8 130 83 6
38129 - 91 60 6 10 10 10 2 2 6 2 2 6
38130 - 18 18 18 38 38 38 38 38 38 38 38 38
38131 - 38 38 38 38 38 38 38 38 38 38 38 38
38132 - 38 38 38 38 38 38 26 26 26 2 2 6
38133 - 2 2 6 6 6 6 70 47 6 137 92 6
38134 -175 118 6 200 144 11 219 162 10 230 174 11
38135 -234 174 13 230 174 11 219 162 10 210 150 10
38136 -192 133 9 163 110 8 124 112 88 82 82 82
38137 - 50 50 50 30 30 30 14 14 14 6 6 6
38138 - 0 0 0 0 0 0 0 0 0 0 0 0
38139 - 0 0 0 0 0 0 0 0 0 0 0 0
38140 - 0 0 0 0 0 0 0 0 0 0 0 0
38141 - 0 0 0 0 0 0 0 0 0 0 0 0
38142 - 0 0 0 0 0 0 0 0 0 0 0 0
38143 - 6 6 6 14 14 14 22 22 22 34 34 34
38144 - 42 42 42 58 58 58 74 74 74 86 86 86
38145 -101 98 89 122 102 70 130 98 46 121 87 25
38146 -137 92 6 152 99 6 163 110 8 180 123 7
38147 -185 133 11 197 138 11 206 145 10 200 144 11
38148 -180 123 7 156 107 11 130 83 6 104 69 6
38149 - 50 34 6 54 54 54 110 110 110 101 98 89
38150 - 86 86 86 82 82 82 78 78 78 78 78 78
38151 - 78 78 78 78 78 78 78 78 78 78 78 78
38152 - 78 78 78 82 82 82 86 86 86 94 94 94
38153 -106 106 106 101 101 101 86 66 34 124 80 6
38154 -156 107 11 180 123 7 192 133 9 200 144 11
38155 -206 145 10 200 144 11 192 133 9 175 118 6
38156 -139 102 15 109 106 95 70 70 70 42 42 42
38157 - 22 22 22 10 10 10 0 0 0 0 0 0
38158 - 0 0 0 0 0 0 0 0 0 0 0 0
38159 - 0 0 0 0 0 0 0 0 0 0 0 0
38160 - 0 0 0 0 0 0 0 0 0 0 0 0
38161 - 0 0 0 0 0 0 0 0 0 0 0 0
38162 - 0 0 0 0 0 0 0 0 0 0 0 0
38163 - 0 0 0 0 0 0 6 6 6 10 10 10
38164 - 14 14 14 22 22 22 30 30 30 38 38 38
38165 - 50 50 50 62 62 62 74 74 74 90 90 90
38166 -101 98 89 112 100 78 121 87 25 124 80 6
38167 -137 92 6 152 99 6 152 99 6 152 99 6
38168 -138 86 6 124 80 6 98 70 6 86 66 30
38169 -101 98 89 82 82 82 58 58 58 46 46 46
38170 - 38 38 38 34 34 34 34 34 34 34 34 34
38171 - 34 34 34 34 34 34 34 34 34 34 34 34
38172 - 34 34 34 34 34 34 38 38 38 42 42 42
38173 - 54 54 54 82 82 82 94 86 76 91 60 6
38174 -134 86 6 156 107 11 167 114 7 175 118 6
38175 -175 118 6 167 114 7 152 99 6 121 87 25
38176 -101 98 89 62 62 62 34 34 34 18 18 18
38177 - 6 6 6 0 0 0 0 0 0 0 0 0
38178 - 0 0 0 0 0 0 0 0 0 0 0 0
38179 - 0 0 0 0 0 0 0 0 0 0 0 0
38180 - 0 0 0 0 0 0 0 0 0 0 0 0
38181 - 0 0 0 0 0 0 0 0 0 0 0 0
38182 - 0 0 0 0 0 0 0 0 0 0 0 0
38183 - 0 0 0 0 0 0 0 0 0 0 0 0
38184 - 0 0 0 6 6 6 6 6 6 10 10 10
38185 - 18 18 18 22 22 22 30 30 30 42 42 42
38186 - 50 50 50 66 66 66 86 86 86 101 98 89
38187 -106 86 58 98 70 6 104 69 6 104 69 6
38188 -104 69 6 91 60 6 82 62 34 90 90 90
38189 - 62 62 62 38 38 38 22 22 22 14 14 14
38190 - 10 10 10 10 10 10 10 10 10 10 10 10
38191 - 10 10 10 10 10 10 6 6 6 10 10 10
38192 - 10 10 10 10 10 10 10 10 10 14 14 14
38193 - 22 22 22 42 42 42 70 70 70 89 81 66
38194 - 80 54 7 104 69 6 124 80 6 137 92 6
38195 -134 86 6 116 81 8 100 82 52 86 86 86
38196 - 58 58 58 30 30 30 14 14 14 6 6 6
38197 - 0 0 0 0 0 0 0 0 0 0 0 0
38198 - 0 0 0 0 0 0 0 0 0 0 0 0
38199 - 0 0 0 0 0 0 0 0 0 0 0 0
38200 - 0 0 0 0 0 0 0 0 0 0 0 0
38201 - 0 0 0 0 0 0 0 0 0 0 0 0
38202 - 0 0 0 0 0 0 0 0 0 0 0 0
38203 - 0 0 0 0 0 0 0 0 0 0 0 0
38204 - 0 0 0 0 0 0 0 0 0 0 0 0
38205 - 0 0 0 6 6 6 10 10 10 14 14 14
38206 - 18 18 18 26 26 26 38 38 38 54 54 54
38207 - 70 70 70 86 86 86 94 86 76 89 81 66
38208 - 89 81 66 86 86 86 74 74 74 50 50 50
38209 - 30 30 30 14 14 14 6 6 6 0 0 0
38210 - 0 0 0 0 0 0 0 0 0 0 0 0
38211 - 0 0 0 0 0 0 0 0 0 0 0 0
38212 - 0 0 0 0 0 0 0 0 0 0 0 0
38213 - 6 6 6 18 18 18 34 34 34 58 58 58
38214 - 82 82 82 89 81 66 89 81 66 89 81 66
38215 - 94 86 66 94 86 76 74 74 74 50 50 50
38216 - 26 26 26 14 14 14 6 6 6 0 0 0
38217 - 0 0 0 0 0 0 0 0 0 0 0 0
38218 - 0 0 0 0 0 0 0 0 0 0 0 0
38219 - 0 0 0 0 0 0 0 0 0 0 0 0
38220 - 0 0 0 0 0 0 0 0 0 0 0 0
38221 - 0 0 0 0 0 0 0 0 0 0 0 0
38222 - 0 0 0 0 0 0 0 0 0 0 0 0
38223 - 0 0 0 0 0 0 0 0 0 0 0 0
38224 - 0 0 0 0 0 0 0 0 0 0 0 0
38225 - 0 0 0 0 0 0 0 0 0 0 0 0
38226 - 6 6 6 6 6 6 14 14 14 18 18 18
38227 - 30 30 30 38 38 38 46 46 46 54 54 54
38228 - 50 50 50 42 42 42 30 30 30 18 18 18
38229 - 10 10 10 0 0 0 0 0 0 0 0 0
38230 - 0 0 0 0 0 0 0 0 0 0 0 0
38231 - 0 0 0 0 0 0 0 0 0 0 0 0
38232 - 0 0 0 0 0 0 0 0 0 0 0 0
38233 - 0 0 0 6 6 6 14 14 14 26 26 26
38234 - 38 38 38 50 50 50 58 58 58 58 58 58
38235 - 54 54 54 42 42 42 30 30 30 18 18 18
38236 - 10 10 10 0 0 0 0 0 0 0 0 0
38237 - 0 0 0 0 0 0 0 0 0 0 0 0
38238 - 0 0 0 0 0 0 0 0 0 0 0 0
38239 - 0 0 0 0 0 0 0 0 0 0 0 0
38240 - 0 0 0 0 0 0 0 0 0 0 0 0
38241 - 0 0 0 0 0 0 0 0 0 0 0 0
38242 - 0 0 0 0 0 0 0 0 0 0 0 0
38243 - 0 0 0 0 0 0 0 0 0 0 0 0
38244 - 0 0 0 0 0 0 0 0 0 0 0 0
38245 - 0 0 0 0 0 0 0 0 0 0 0 0
38246 - 0 0 0 0 0 0 0 0 0 6 6 6
38247 - 6 6 6 10 10 10 14 14 14 18 18 18
38248 - 18 18 18 14 14 14 10 10 10 6 6 6
38249 - 0 0 0 0 0 0 0 0 0 0 0 0
38250 - 0 0 0 0 0 0 0 0 0 0 0 0
38251 - 0 0 0 0 0 0 0 0 0 0 0 0
38252 - 0 0 0 0 0 0 0 0 0 0 0 0
38253 - 0 0 0 0 0 0 0 0 0 6 6 6
38254 - 14 14 14 18 18 18 22 22 22 22 22 22
38255 - 18 18 18 14 14 14 10 10 10 6 6 6
38256 - 0 0 0 0 0 0 0 0 0 0 0 0
38257 - 0 0 0 0 0 0 0 0 0 0 0 0
38258 - 0 0 0 0 0 0 0 0 0 0 0 0
38259 - 0 0 0 0 0 0 0 0 0 0 0 0
38260 - 0 0 0 0 0 0 0 0 0 0 0 0
38261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38274 +4 4 4 4 4 4
38275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38288 +4 4 4 4 4 4
38289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38302 +4 4 4 4 4 4
38303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38316 +4 4 4 4 4 4
38317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38330 +4 4 4 4 4 4
38331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38344 +4 4 4 4 4 4
38345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38349 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38350 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38354 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38355 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38356 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38358 +4 4 4 4 4 4
38359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38363 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38364 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38365 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38368 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38369 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38370 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38371 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38372 +4 4 4 4 4 4
38373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38377 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38378 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38379 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38382 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38383 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38384 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38385 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38386 +4 4 4 4 4 4
38387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38390 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38391 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38392 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38393 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38395 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38396 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38397 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38398 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38399 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38400 +4 4 4 4 4 4
38401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38404 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38405 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38406 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38407 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38408 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38409 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38410 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38411 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38412 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38413 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38414 +4 4 4 4 4 4
38415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38418 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38419 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38420 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38421 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38422 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38423 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38424 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38425 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38426 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38427 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38428 +4 4 4 4 4 4
38429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38431 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38432 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38433 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38434 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38435 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38436 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38437 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38438 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38439 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38440 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38441 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38442 +4 4 4 4 4 4
38443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38445 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38446 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38447 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38448 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38449 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38450 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38451 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38452 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38453 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38454 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38455 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38456 +4 4 4 4 4 4
38457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38459 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38460 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38461 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38462 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38463 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38464 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38465 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38466 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38467 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38468 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38469 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38470 +4 4 4 4 4 4
38471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38473 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38474 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38475 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38476 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38477 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38478 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38479 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38480 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38481 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38482 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38483 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38484 +4 4 4 4 4 4
38485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38486 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38487 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38488 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38489 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38490 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38491 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38492 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38493 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38494 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38495 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38496 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38497 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38498 +4 4 4 4 4 4
38499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38500 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38501 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38502 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38503 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38504 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38505 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38506 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38507 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38508 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38509 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38510 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38511 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38512 +0 0 0 4 4 4
38513 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38514 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38515 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38516 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38517 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38518 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38519 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38520 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38521 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38522 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38523 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38524 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38525 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38526 +2 0 0 0 0 0
38527 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38528 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38529 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38530 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38531 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38532 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38533 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38534 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38535 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38536 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38537 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38538 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38539 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38540 +37 38 37 0 0 0
38541 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38542 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38543 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38544 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38545 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38546 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38547 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38548 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38549 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38550 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38551 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38552 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38553 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38554 +85 115 134 4 0 0
38555 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38556 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38557 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38558 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38559 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38560 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38561 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38562 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38563 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38564 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38565 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38566 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38567 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38568 +60 73 81 4 0 0
38569 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38570 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38571 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38572 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38573 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38574 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38575 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38576 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38577 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38578 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38579 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38580 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38581 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38582 +16 19 21 4 0 0
38583 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38584 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38585 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38586 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38587 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38588 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38589 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38590 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38591 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38592 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38593 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38594 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38595 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38596 +4 0 0 4 3 3
38597 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38598 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38599 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38601 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38602 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38603 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38604 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38605 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38606 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38607 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38608 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38609 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38610 +3 2 2 4 4 4
38611 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38612 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38613 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38614 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38615 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38616 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38617 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38618 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38619 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38620 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38621 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38622 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38623 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38624 +4 4 4 4 4 4
38625 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38626 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38627 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38628 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38629 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38630 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38631 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38632 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38633 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38634 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38635 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38636 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38637 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38638 +4 4 4 4 4 4
38639 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38640 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38641 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38642 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38643 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38644 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38645 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38646 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38647 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38648 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38649 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38650 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38651 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38652 +5 5 5 5 5 5
38653 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38654 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38655 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38656 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38657 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38658 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38659 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38660 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38661 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38662 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38663 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38664 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38665 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38666 +5 5 5 4 4 4
38667 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38668 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38669 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38670 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38671 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38672 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38673 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38674 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38675 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38676 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38677 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38678 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680 +4 4 4 4 4 4
38681 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38682 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38683 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38684 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38685 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38686 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38687 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38688 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38689 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38690 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38691 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38692 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38694 +4 4 4 4 4 4
38695 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38696 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38697 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38698 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38699 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38700 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38701 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38702 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38703 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38704 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38705 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708 +4 4 4 4 4 4
38709 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38710 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38711 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38712 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38713 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38714 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38715 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38716 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38717 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38718 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38719 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722 +4 4 4 4 4 4
38723 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38724 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38725 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38726 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38727 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38728 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38729 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38730 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38731 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38732 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38733 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736 +4 4 4 4 4 4
38737 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38738 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38739 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38740 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38741 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38742 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38743 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38744 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38745 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38746 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38747 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750 +4 4 4 4 4 4
38751 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38752 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38753 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38754 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38755 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38756 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38757 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38758 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38759 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38760 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38761 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764 +4 4 4 4 4 4
38765 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38766 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38767 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38768 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38769 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38770 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38771 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38772 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38773 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38774 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38775 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778 +4 4 4 4 4 4
38779 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38780 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38781 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38782 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38783 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38784 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38785 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38786 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38787 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38788 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38789 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792 +4 4 4 4 4 4
38793 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38794 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38795 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38796 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38797 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38798 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38799 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38800 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38801 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38802 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38803 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806 +4 4 4 4 4 4
38807 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38808 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38809 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38810 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38811 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38812 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38813 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38814 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38815 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38816 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38817 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820 +4 4 4 4 4 4
38821 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38822 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38823 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38824 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38825 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38826 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38827 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38828 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38829 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38830 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38831 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834 +4 4 4 4 4 4
38835 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38836 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38837 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38838 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38839 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38840 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38841 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38842 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38843 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38844 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38845 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848 +4 4 4 4 4 4
38849 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38850 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38851 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38852 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38853 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38854 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38855 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38856 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38857 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38858 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38859 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862 +4 4 4 4 4 4
38863 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38864 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38865 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38866 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38867 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38868 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38869 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38870 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38871 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38872 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38873 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38876 +4 4 4 4 4 4
38877 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38878 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38879 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38880 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38881 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38882 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38883 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38884 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38885 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38886 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38887 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38890 +4 4 4 4 4 4
38891 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38892 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38893 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38894 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38895 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38896 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38897 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38898 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38899 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38900 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38901 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38904 +4 4 4 4 4 4
38905 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38906 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38907 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38908 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38909 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38910 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38911 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38912 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38913 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38914 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38915 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38918 +4 4 4 4 4 4
38919 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38920 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38921 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38922 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38923 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38924 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38925 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38926 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38927 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38928 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38929 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38932 +4 4 4 4 4 4
38933 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38934 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38935 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38936 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38937 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38938 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38939 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38940 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38941 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38942 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38943 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38946 +4 4 4 4 4 4
38947 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38948 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38949 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38950 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38951 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38952 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38953 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38954 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38955 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38956 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38957 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960 +4 4 4 4 4 4
38961 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38962 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38963 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38964 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38965 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38966 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38967 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38968 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38969 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38970 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38971 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974 +4 4 4 4 4 4
38975 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38976 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38977 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38978 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38979 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38980 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38981 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38982 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38983 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38984 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38985 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988 +4 4 4 4 4 4
38989 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38990 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38991 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38992 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38993 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38994 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38995 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38996 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38997 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38998 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38999 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002 +4 4 4 4 4 4
39003 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
39004 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
39005 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
39006 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
39007 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
39008 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
39009 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
39010 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
39011 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
39012 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39013 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016 +4 4 4 4 4 4
39017 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
39018 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
39019 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
39020 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
39021 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
39022 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
39023 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
39024 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
39025 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
39026 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
39027 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030 +4 4 4 4 4 4
39031 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
39032 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39033 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
39034 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
39035 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
39036 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
39037 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
39038 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
39039 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
39040 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
39041 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044 +4 4 4 4 4 4
39045 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
39046 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39047 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
39048 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
39049 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
39050 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
39051 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39052 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
39053 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
39054 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
39055 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058 +4 4 4 4 4 4
39059 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
39060 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
39061 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
39062 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
39063 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
39064 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
39065 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
39066 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
39067 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
39068 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
39069 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072 +4 4 4 4 4 4
39073 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
39074 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
39075 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39076 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
39077 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
39078 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
39079 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
39080 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
39081 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
39082 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
39083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086 +4 4 4 4 4 4
39087 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39088 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
39089 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
39090 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
39091 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
39092 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
39093 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
39094 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
39095 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
39096 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100 +4 4 4 4 4 4
39101 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
39102 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
39103 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
39104 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
39105 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
39106 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
39107 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
39108 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
39109 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
39110 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114 +4 4 4 4 4 4
39115 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39116 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39117 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39118 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39119 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39120 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39121 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39122 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39123 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39124 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128 +4 4 4 4 4 4
39129 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39130 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39131 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39132 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39133 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39134 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39135 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39136 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39137 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142 +4 4 4 4 4 4
39143 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39144 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39145 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39146 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39147 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39148 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39149 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39150 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39151 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156 +4 4 4 4 4 4
39157 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39158 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39159 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39160 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39161 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39162 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39163 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39164 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39165 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170 +4 4 4 4 4 4
39171 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39172 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39173 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39174 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39175 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39176 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39177 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39178 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184 +4 4 4 4 4 4
39185 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39186 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39187 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39188 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39189 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39190 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39191 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39192 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198 +4 4 4 4 4 4
39199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39200 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39201 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39202 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39203 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39204 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39205 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39206 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39212 +4 4 4 4 4 4
39213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39214 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39215 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39216 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39217 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39218 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39219 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39220 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226 +4 4 4 4 4 4
39227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39228 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39229 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39230 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39231 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39232 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39233 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39234 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39240 +4 4 4 4 4 4
39241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39243 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39244 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39245 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39246 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39247 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39248 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254 +4 4 4 4 4 4
39255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39258 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39259 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39260 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39261 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39268 +4 4 4 4 4 4
39269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39272 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39273 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39274 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39275 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39282 +4 4 4 4 4 4
39283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39286 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39287 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39288 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39289 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39296 +4 4 4 4 4 4
39297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39300 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39301 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39302 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39303 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39310 +4 4 4 4 4 4
39311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39315 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39316 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39317 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39324 +4 4 4 4 4 4
39325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39329 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39330 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39331 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39338 +4 4 4 4 4 4
39339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39343 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39344 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39345 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39352 +4 4 4 4 4 4
39353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39357 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39358 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39366 +4 4 4 4 4 4
39367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39370 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39371 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39372 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39380 +4 4 4 4 4 4
39381 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39382 index 3473e75..c930142 100644
39383 --- a/drivers/video/udlfb.c
39384 +++ b/drivers/video/udlfb.c
39385 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39386 dlfb_urb_completion(urb);
39387
39388 error:
39389 - atomic_add(bytes_sent, &dev->bytes_sent);
39390 - atomic_add(bytes_identical, &dev->bytes_identical);
39391 - atomic_add(width*height*2, &dev->bytes_rendered);
39392 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39393 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39394 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39395 end_cycles = get_cycles();
39396 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39397 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39398 >> 10)), /* Kcycles */
39399 &dev->cpu_kcycles_used);
39400
39401 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39402 dlfb_urb_completion(urb);
39403
39404 error:
39405 - atomic_add(bytes_sent, &dev->bytes_sent);
39406 - atomic_add(bytes_identical, &dev->bytes_identical);
39407 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39408 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39409 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39410 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39411 end_cycles = get_cycles();
39412 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39413 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39414 >> 10)), /* Kcycles */
39415 &dev->cpu_kcycles_used);
39416 }
39417 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39418 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39419 struct dlfb_data *dev = fb_info->par;
39420 return snprintf(buf, PAGE_SIZE, "%u\n",
39421 - atomic_read(&dev->bytes_rendered));
39422 + atomic_read_unchecked(&dev->bytes_rendered));
39423 }
39424
39425 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39426 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39427 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39428 struct dlfb_data *dev = fb_info->par;
39429 return snprintf(buf, PAGE_SIZE, "%u\n",
39430 - atomic_read(&dev->bytes_identical));
39431 + atomic_read_unchecked(&dev->bytes_identical));
39432 }
39433
39434 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39435 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39436 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39437 struct dlfb_data *dev = fb_info->par;
39438 return snprintf(buf, PAGE_SIZE, "%u\n",
39439 - atomic_read(&dev->bytes_sent));
39440 + atomic_read_unchecked(&dev->bytes_sent));
39441 }
39442
39443 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39444 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39445 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39446 struct dlfb_data *dev = fb_info->par;
39447 return snprintf(buf, PAGE_SIZE, "%u\n",
39448 - atomic_read(&dev->cpu_kcycles_used));
39449 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39450 }
39451
39452 static ssize_t edid_show(
39453 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39454 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39455 struct dlfb_data *dev = fb_info->par;
39456
39457 - atomic_set(&dev->bytes_rendered, 0);
39458 - atomic_set(&dev->bytes_identical, 0);
39459 - atomic_set(&dev->bytes_sent, 0);
39460 - atomic_set(&dev->cpu_kcycles_used, 0);
39461 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39462 + atomic_set_unchecked(&dev->bytes_identical, 0);
39463 + atomic_set_unchecked(&dev->bytes_sent, 0);
39464 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39465
39466 return count;
39467 }
39468 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39469 index 7f8472c..9842e87 100644
39470 --- a/drivers/video/uvesafb.c
39471 +++ b/drivers/video/uvesafb.c
39472 @@ -19,6 +19,7 @@
39473 #include <linux/io.h>
39474 #include <linux/mutex.h>
39475 #include <linux/slab.h>
39476 +#include <linux/moduleloader.h>
39477 #include <video/edid.h>
39478 #include <video/uvesafb.h>
39479 #ifdef CONFIG_X86
39480 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39481 NULL,
39482 };
39483
39484 - return call_usermodehelper(v86d_path, argv, envp, 1);
39485 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39486 }
39487
39488 /*
39489 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39490 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39491 par->pmi_setpal = par->ypan = 0;
39492 } else {
39493 +
39494 +#ifdef CONFIG_PAX_KERNEXEC
39495 +#ifdef CONFIG_MODULES
39496 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39497 +#endif
39498 + if (!par->pmi_code) {
39499 + par->pmi_setpal = par->ypan = 0;
39500 + return 0;
39501 + }
39502 +#endif
39503 +
39504 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39505 + task->t.regs.edi);
39506 +
39507 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39508 + pax_open_kernel();
39509 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39510 + pax_close_kernel();
39511 +
39512 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39513 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39514 +#else
39515 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39516 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39517 +#endif
39518 +
39519 printk(KERN_INFO "uvesafb: protected mode interface info at "
39520 "%04x:%04x\n",
39521 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39522 @@ -1821,6 +1844,11 @@ out:
39523 if (par->vbe_modes)
39524 kfree(par->vbe_modes);
39525
39526 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39527 + if (par->pmi_code)
39528 + module_free_exec(NULL, par->pmi_code);
39529 +#endif
39530 +
39531 framebuffer_release(info);
39532 return err;
39533 }
39534 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39535 kfree(par->vbe_state_orig);
39536 if (par->vbe_state_saved)
39537 kfree(par->vbe_state_saved);
39538 +
39539 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39540 + if (par->pmi_code)
39541 + module_free_exec(NULL, par->pmi_code);
39542 +#endif
39543 +
39544 }
39545
39546 framebuffer_release(info);
39547 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39548 index 501b340..86bd4cf 100644
39549 --- a/drivers/video/vesafb.c
39550 +++ b/drivers/video/vesafb.c
39551 @@ -9,6 +9,7 @@
39552 */
39553
39554 #include <linux/module.h>
39555 +#include <linux/moduleloader.h>
39556 #include <linux/kernel.h>
39557 #include <linux/errno.h>
39558 #include <linux/string.h>
39559 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39560 static int vram_total __initdata; /* Set total amount of memory */
39561 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39562 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39563 -static void (*pmi_start)(void) __read_mostly;
39564 -static void (*pmi_pal) (void) __read_mostly;
39565 +static void (*pmi_start)(void) __read_only;
39566 +static void (*pmi_pal) (void) __read_only;
39567 static int depth __read_mostly;
39568 static int vga_compat __read_mostly;
39569 /* --------------------------------------------------------------------- */
39570 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39571 unsigned int size_vmode;
39572 unsigned int size_remap;
39573 unsigned int size_total;
39574 + void *pmi_code = NULL;
39575
39576 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39577 return -ENODEV;
39578 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39579 size_remap = size_total;
39580 vesafb_fix.smem_len = size_remap;
39581
39582 -#ifndef __i386__
39583 - screen_info.vesapm_seg = 0;
39584 -#endif
39585 -
39586 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39587 printk(KERN_WARNING
39588 "vesafb: cannot reserve video memory at 0x%lx\n",
39589 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39590 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39591 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39592
39593 +#ifdef __i386__
39594 +
39595 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39596 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39597 + if (!pmi_code)
39598 +#elif !defined(CONFIG_PAX_KERNEXEC)
39599 + if (0)
39600 +#endif
39601 +
39602 +#endif
39603 + screen_info.vesapm_seg = 0;
39604 +
39605 if (screen_info.vesapm_seg) {
39606 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39607 - screen_info.vesapm_seg,screen_info.vesapm_off);
39608 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39609 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39610 }
39611
39612 if (screen_info.vesapm_seg < 0xc000)
39613 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39614
39615 if (ypan || pmi_setpal) {
39616 unsigned short *pmi_base;
39617 +
39618 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39619 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39620 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39621 +
39622 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39623 + pax_open_kernel();
39624 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39625 +#else
39626 + pmi_code = pmi_base;
39627 +#endif
39628 +
39629 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39630 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39631 +
39632 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39633 + pmi_start = ktva_ktla(pmi_start);
39634 + pmi_pal = ktva_ktla(pmi_pal);
39635 + pax_close_kernel();
39636 +#endif
39637 +
39638 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39639 if (pmi_base[3]) {
39640 printk(KERN_INFO "vesafb: pmi: ports = ");
39641 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39642 info->node, info->fix.id);
39643 return 0;
39644 err:
39645 +
39646 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39647 + module_free_exec(NULL, pmi_code);
39648 +#endif
39649 +
39650 if (info->screen_base)
39651 iounmap(info->screen_base);
39652 framebuffer_release(info);
39653 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39654 index 88714ae..16c2e11 100644
39655 --- a/drivers/video/via/via_clock.h
39656 +++ b/drivers/video/via/via_clock.h
39657 @@ -56,7 +56,7 @@ struct via_clock {
39658
39659 void (*set_engine_pll_state)(u8 state);
39660 void (*set_engine_pll)(struct via_pll_config config);
39661 -};
39662 +} __no_const;
39663
39664
39665 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39666 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39667 index e56c934..fc22f4b 100644
39668 --- a/drivers/xen/xen-pciback/conf_space.h
39669 +++ b/drivers/xen/xen-pciback/conf_space.h
39670 @@ -44,15 +44,15 @@ struct config_field {
39671 struct {
39672 conf_dword_write write;
39673 conf_dword_read read;
39674 - } dw;
39675 + } __no_const dw;
39676 struct {
39677 conf_word_write write;
39678 conf_word_read read;
39679 - } w;
39680 + } __no_const w;
39681 struct {
39682 conf_byte_write write;
39683 conf_byte_read read;
39684 - } b;
39685 + } __no_const b;
39686 } u;
39687 struct list_head list;
39688 };
39689 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39690 index 879ed88..bc03a01 100644
39691 --- a/fs/9p/vfs_inode.c
39692 +++ b/fs/9p/vfs_inode.c
39693 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39694 void
39695 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39696 {
39697 - char *s = nd_get_link(nd);
39698 + const char *s = nd_get_link(nd);
39699
39700 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39701 IS_ERR(s) ? "<error>" : s);
39702 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39703 index 79e2ca7..5828ad1 100644
39704 --- a/fs/Kconfig.binfmt
39705 +++ b/fs/Kconfig.binfmt
39706 @@ -86,7 +86,7 @@ config HAVE_AOUT
39707
39708 config BINFMT_AOUT
39709 tristate "Kernel support for a.out and ECOFF binaries"
39710 - depends on HAVE_AOUT
39711 + depends on HAVE_AOUT && BROKEN
39712 ---help---
39713 A.out (Assembler.OUTput) is a set of formats for libraries and
39714 executables used in the earliest versions of UNIX. Linux used
39715 diff --git a/fs/aio.c b/fs/aio.c
39716 index 969beb0..09fab51 100644
39717 --- a/fs/aio.c
39718 +++ b/fs/aio.c
39719 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39720 size += sizeof(struct io_event) * nr_events;
39721 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39722
39723 - if (nr_pages < 0)
39724 + if (nr_pages <= 0)
39725 return -EINVAL;
39726
39727 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39728 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39729 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39730 {
39731 ssize_t ret;
39732 + struct iovec iovstack;
39733
39734 #ifdef CONFIG_COMPAT
39735 if (compat)
39736 ret = compat_rw_copy_check_uvector(type,
39737 (struct compat_iovec __user *)kiocb->ki_buf,
39738 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39739 + kiocb->ki_nbytes, 1, &iovstack,
39740 &kiocb->ki_iovec, 1);
39741 else
39742 #endif
39743 ret = rw_copy_check_uvector(type,
39744 (struct iovec __user *)kiocb->ki_buf,
39745 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39746 + kiocb->ki_nbytes, 1, &iovstack,
39747 &kiocb->ki_iovec, 1);
39748 if (ret < 0)
39749 goto out;
39750
39751 + if (kiocb->ki_iovec == &iovstack) {
39752 + kiocb->ki_inline_vec = iovstack;
39753 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39754 + }
39755 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39756 kiocb->ki_cur_seg = 0;
39757 /* ki_nbytes/left now reflect bytes instead of segs */
39758 diff --git a/fs/attr.c b/fs/attr.c
39759 index 7ee7ba4..0c61a60 100644
39760 --- a/fs/attr.c
39761 +++ b/fs/attr.c
39762 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39763 unsigned long limit;
39764
39765 limit = rlimit(RLIMIT_FSIZE);
39766 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39767 if (limit != RLIM_INFINITY && offset > limit)
39768 goto out_sig;
39769 if (offset > inode->i_sb->s_maxbytes)
39770 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39771 index e1fbdee..cd5ea56 100644
39772 --- a/fs/autofs4/waitq.c
39773 +++ b/fs/autofs4/waitq.c
39774 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39775 {
39776 unsigned long sigpipe, flags;
39777 mm_segment_t fs;
39778 - const char *data = (const char *)addr;
39779 + const char __user *data = (const char __force_user *)addr;
39780 ssize_t wr = 0;
39781
39782 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39783 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39784 index 8342ca6..82fd192 100644
39785 --- a/fs/befs/linuxvfs.c
39786 +++ b/fs/befs/linuxvfs.c
39787 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39788 {
39789 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39790 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39791 - char *link = nd_get_link(nd);
39792 + const char *link = nd_get_link(nd);
39793 if (!IS_ERR(link))
39794 kfree(link);
39795 }
39796 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39797 index a6395bd..a5b24c4 100644
39798 --- a/fs/binfmt_aout.c
39799 +++ b/fs/binfmt_aout.c
39800 @@ -16,6 +16,7 @@
39801 #include <linux/string.h>
39802 #include <linux/fs.h>
39803 #include <linux/file.h>
39804 +#include <linux/security.h>
39805 #include <linux/stat.h>
39806 #include <linux/fcntl.h>
39807 #include <linux/ptrace.h>
39808 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39809 #endif
39810 # define START_STACK(u) ((void __user *)u.start_stack)
39811
39812 + memset(&dump, 0, sizeof(dump));
39813 +
39814 fs = get_fs();
39815 set_fs(KERNEL_DS);
39816 has_dumped = 1;
39817 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39818
39819 /* If the size of the dump file exceeds the rlimit, then see what would happen
39820 if we wrote the stack, but not the data area. */
39821 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39822 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39823 dump.u_dsize = 0;
39824
39825 /* Make sure we have enough room to write the stack and data areas. */
39826 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39827 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39828 dump.u_ssize = 0;
39829
39830 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39831 rlim = rlimit(RLIMIT_DATA);
39832 if (rlim >= RLIM_INFINITY)
39833 rlim = ~0;
39834 +
39835 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39836 if (ex.a_data + ex.a_bss > rlim)
39837 return -ENOMEM;
39838
39839 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39840 install_exec_creds(bprm);
39841 current->flags &= ~PF_FORKNOEXEC;
39842
39843 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39844 + current->mm->pax_flags = 0UL;
39845 +#endif
39846 +
39847 +#ifdef CONFIG_PAX_PAGEEXEC
39848 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39849 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39850 +
39851 +#ifdef CONFIG_PAX_EMUTRAMP
39852 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39853 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39854 +#endif
39855 +
39856 +#ifdef CONFIG_PAX_MPROTECT
39857 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39858 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39859 +#endif
39860 +
39861 + }
39862 +#endif
39863 +
39864 if (N_MAGIC(ex) == OMAGIC) {
39865 unsigned long text_addr, map_size;
39866 loff_t pos;
39867 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39868
39869 down_write(&current->mm->mmap_sem);
39870 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39871 - PROT_READ | PROT_WRITE | PROT_EXEC,
39872 + PROT_READ | PROT_WRITE,
39873 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39874 fd_offset + ex.a_text);
39875 up_write(&current->mm->mmap_sem);
39876 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39877 index 21ac5ee..31d14e9 100644
39878 --- a/fs/binfmt_elf.c
39879 +++ b/fs/binfmt_elf.c
39880 @@ -32,6 +32,7 @@
39881 #include <linux/elf.h>
39882 #include <linux/utsname.h>
39883 #include <linux/coredump.h>
39884 +#include <linux/xattr.h>
39885 #include <asm/uaccess.h>
39886 #include <asm/param.h>
39887 #include <asm/page.h>
39888 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39889 #define elf_core_dump NULL
39890 #endif
39891
39892 +#ifdef CONFIG_PAX_MPROTECT
39893 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39894 +#endif
39895 +
39896 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39897 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39898 #else
39899 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39900 .load_binary = load_elf_binary,
39901 .load_shlib = load_elf_library,
39902 .core_dump = elf_core_dump,
39903 +
39904 +#ifdef CONFIG_PAX_MPROTECT
39905 + .handle_mprotect= elf_handle_mprotect,
39906 +#endif
39907 +
39908 .min_coredump = ELF_EXEC_PAGESIZE,
39909 };
39910
39911 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39912
39913 static int set_brk(unsigned long start, unsigned long end)
39914 {
39915 + unsigned long e = end;
39916 +
39917 start = ELF_PAGEALIGN(start);
39918 end = ELF_PAGEALIGN(end);
39919 if (end > start) {
39920 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39921 if (BAD_ADDR(addr))
39922 return addr;
39923 }
39924 - current->mm->start_brk = current->mm->brk = end;
39925 + current->mm->start_brk = current->mm->brk = e;
39926 return 0;
39927 }
39928
39929 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39930 elf_addr_t __user *u_rand_bytes;
39931 const char *k_platform = ELF_PLATFORM;
39932 const char *k_base_platform = ELF_BASE_PLATFORM;
39933 - unsigned char k_rand_bytes[16];
39934 + u32 k_rand_bytes[4];
39935 int items;
39936 elf_addr_t *elf_info;
39937 int ei_index = 0;
39938 const struct cred *cred = current_cred();
39939 struct vm_area_struct *vma;
39940 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39941
39942 /*
39943 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39944 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39945 * Generate 16 random bytes for userspace PRNG seeding.
39946 */
39947 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39948 - u_rand_bytes = (elf_addr_t __user *)
39949 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39950 + srandom32(k_rand_bytes[0] ^ random32());
39951 + srandom32(k_rand_bytes[1] ^ random32());
39952 + srandom32(k_rand_bytes[2] ^ random32());
39953 + srandom32(k_rand_bytes[3] ^ random32());
39954 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39955 + u_rand_bytes = (elf_addr_t __user *) p;
39956 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39957 return -EFAULT;
39958
39959 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39960 return -EFAULT;
39961 current->mm->env_end = p;
39962
39963 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39964 +
39965 /* Put the elf_info on the stack in the right place. */
39966 sp = (elf_addr_t __user *)envp + 1;
39967 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39968 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39969 return -EFAULT;
39970 return 0;
39971 }
39972 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39973 {
39974 struct elf_phdr *elf_phdata;
39975 struct elf_phdr *eppnt;
39976 - unsigned long load_addr = 0;
39977 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39978 int load_addr_set = 0;
39979 unsigned long last_bss = 0, elf_bss = 0;
39980 - unsigned long error = ~0UL;
39981 + unsigned long error = -EINVAL;
39982 unsigned long total_size;
39983 int retval, i, size;
39984
39985 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39986 goto out_close;
39987 }
39988
39989 +#ifdef CONFIG_PAX_SEGMEXEC
39990 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39991 + pax_task_size = SEGMEXEC_TASK_SIZE;
39992 +#endif
39993 +
39994 eppnt = elf_phdata;
39995 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39996 if (eppnt->p_type == PT_LOAD) {
39997 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39998 k = load_addr + eppnt->p_vaddr;
39999 if (BAD_ADDR(k) ||
40000 eppnt->p_filesz > eppnt->p_memsz ||
40001 - eppnt->p_memsz > TASK_SIZE ||
40002 - TASK_SIZE - eppnt->p_memsz < k) {
40003 + eppnt->p_memsz > pax_task_size ||
40004 + pax_task_size - eppnt->p_memsz < k) {
40005 error = -ENOMEM;
40006 goto out_close;
40007 }
40008 @@ -528,6 +552,351 @@ out:
40009 return error;
40010 }
40011
40012 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
40013 +{
40014 + unsigned long pax_flags = 0UL;
40015 +
40016 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40017 +
40018 +#ifdef CONFIG_PAX_PAGEEXEC
40019 + if (elf_phdata->p_flags & PF_PAGEEXEC)
40020 + pax_flags |= MF_PAX_PAGEEXEC;
40021 +#endif
40022 +
40023 +#ifdef CONFIG_PAX_SEGMEXEC
40024 + if (elf_phdata->p_flags & PF_SEGMEXEC)
40025 + pax_flags |= MF_PAX_SEGMEXEC;
40026 +#endif
40027 +
40028 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40029 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40030 + if ((__supported_pte_mask & _PAGE_NX))
40031 + pax_flags &= ~MF_PAX_SEGMEXEC;
40032 + else
40033 + pax_flags &= ~MF_PAX_PAGEEXEC;
40034 + }
40035 +#endif
40036 +
40037 +#ifdef CONFIG_PAX_EMUTRAMP
40038 + if (elf_phdata->p_flags & PF_EMUTRAMP)
40039 + pax_flags |= MF_PAX_EMUTRAMP;
40040 +#endif
40041 +
40042 +#ifdef CONFIG_PAX_MPROTECT
40043 + if (elf_phdata->p_flags & PF_MPROTECT)
40044 + pax_flags |= MF_PAX_MPROTECT;
40045 +#endif
40046 +
40047 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40048 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
40049 + pax_flags |= MF_PAX_RANDMMAP;
40050 +#endif
40051 +
40052 +#endif
40053 +
40054 + return pax_flags;
40055 +}
40056 +
40057 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
40058 +{
40059 + unsigned long pax_flags = 0UL;
40060 +
40061 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40062 +
40063 +#ifdef CONFIG_PAX_PAGEEXEC
40064 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
40065 + pax_flags |= MF_PAX_PAGEEXEC;
40066 +#endif
40067 +
40068 +#ifdef CONFIG_PAX_SEGMEXEC
40069 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
40070 + pax_flags |= MF_PAX_SEGMEXEC;
40071 +#endif
40072 +
40073 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40074 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40075 + if ((__supported_pte_mask & _PAGE_NX))
40076 + pax_flags &= ~MF_PAX_SEGMEXEC;
40077 + else
40078 + pax_flags &= ~MF_PAX_PAGEEXEC;
40079 + }
40080 +#endif
40081 +
40082 +#ifdef CONFIG_PAX_EMUTRAMP
40083 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
40084 + pax_flags |= MF_PAX_EMUTRAMP;
40085 +#endif
40086 +
40087 +#ifdef CONFIG_PAX_MPROTECT
40088 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
40089 + pax_flags |= MF_PAX_MPROTECT;
40090 +#endif
40091 +
40092 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40093 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
40094 + pax_flags |= MF_PAX_RANDMMAP;
40095 +#endif
40096 +
40097 +#endif
40098 +
40099 + return pax_flags;
40100 +}
40101 +
40102 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
40103 +{
40104 + unsigned long pax_flags = 0UL;
40105 +
40106 +#ifdef CONFIG_PAX_EI_PAX
40107 +
40108 +#ifdef CONFIG_PAX_PAGEEXEC
40109 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
40110 + pax_flags |= MF_PAX_PAGEEXEC;
40111 +#endif
40112 +
40113 +#ifdef CONFIG_PAX_SEGMEXEC
40114 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
40115 + pax_flags |= MF_PAX_SEGMEXEC;
40116 +#endif
40117 +
40118 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40119 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40120 + if ((__supported_pte_mask & _PAGE_NX))
40121 + pax_flags &= ~MF_PAX_SEGMEXEC;
40122 + else
40123 + pax_flags &= ~MF_PAX_PAGEEXEC;
40124 + }
40125 +#endif
40126 +
40127 +#ifdef CONFIG_PAX_EMUTRAMP
40128 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
40129 + pax_flags |= MF_PAX_EMUTRAMP;
40130 +#endif
40131 +
40132 +#ifdef CONFIG_PAX_MPROTECT
40133 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
40134 + pax_flags |= MF_PAX_MPROTECT;
40135 +#endif
40136 +
40137 +#ifdef CONFIG_PAX_ASLR
40138 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
40139 + pax_flags |= MF_PAX_RANDMMAP;
40140 +#endif
40141 +
40142 +#else
40143 +
40144 +#ifdef CONFIG_PAX_PAGEEXEC
40145 + pax_flags |= MF_PAX_PAGEEXEC;
40146 +#endif
40147 +
40148 +#ifdef CONFIG_PAX_MPROTECT
40149 + pax_flags |= MF_PAX_MPROTECT;
40150 +#endif
40151 +
40152 +#ifdef CONFIG_PAX_RANDMMAP
40153 + pax_flags |= MF_PAX_RANDMMAP;
40154 +#endif
40155 +
40156 +#ifdef CONFIG_PAX_SEGMEXEC
40157 + if (!(__supported_pte_mask & _PAGE_NX)) {
40158 + pax_flags &= ~MF_PAX_PAGEEXEC;
40159 + pax_flags |= MF_PAX_SEGMEXEC;
40160 + }
40161 +#endif
40162 +
40163 +#endif
40164 +
40165 + return pax_flags;
40166 +}
40167 +
40168 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40169 +{
40170 +
40171 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40172 + unsigned long i;
40173 +
40174 + for (i = 0UL; i < elf_ex->e_phnum; i++)
40175 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40176 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40177 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40178 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40179 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40180 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40181 + return ~0UL;
40182 +
40183 +#ifdef CONFIG_PAX_SOFTMODE
40184 + if (pax_softmode)
40185 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
40186 + else
40187 +#endif
40188 +
40189 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
40190 + break;
40191 + }
40192 +#endif
40193 +
40194 + return ~0UL;
40195 +}
40196 +
40197 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40198 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
40199 +{
40200 + unsigned long pax_flags = 0UL;
40201 +
40202 +#ifdef CONFIG_PAX_PAGEEXEC
40203 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
40204 + pax_flags |= MF_PAX_PAGEEXEC;
40205 +#endif
40206 +
40207 +#ifdef CONFIG_PAX_SEGMEXEC
40208 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
40209 + pax_flags |= MF_PAX_SEGMEXEC;
40210 +#endif
40211 +
40212 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40213 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40214 + if ((__supported_pte_mask & _PAGE_NX))
40215 + pax_flags &= ~MF_PAX_SEGMEXEC;
40216 + else
40217 + pax_flags &= ~MF_PAX_PAGEEXEC;
40218 + }
40219 +#endif
40220 +
40221 +#ifdef CONFIG_PAX_EMUTRAMP
40222 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40223 + pax_flags |= MF_PAX_EMUTRAMP;
40224 +#endif
40225 +
40226 +#ifdef CONFIG_PAX_MPROTECT
40227 + if (pax_flags_softmode & MF_PAX_MPROTECT)
40228 + pax_flags |= MF_PAX_MPROTECT;
40229 +#endif
40230 +
40231 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40232 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40233 + pax_flags |= MF_PAX_RANDMMAP;
40234 +#endif
40235 +
40236 + return pax_flags;
40237 +}
40238 +
40239 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40240 +{
40241 + unsigned long pax_flags = 0UL;
40242 +
40243 +#ifdef CONFIG_PAX_PAGEEXEC
40244 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40245 + pax_flags |= MF_PAX_PAGEEXEC;
40246 +#endif
40247 +
40248 +#ifdef CONFIG_PAX_SEGMEXEC
40249 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40250 + pax_flags |= MF_PAX_SEGMEXEC;
40251 +#endif
40252 +
40253 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40254 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40255 + if ((__supported_pte_mask & _PAGE_NX))
40256 + pax_flags &= ~MF_PAX_SEGMEXEC;
40257 + else
40258 + pax_flags &= ~MF_PAX_PAGEEXEC;
40259 + }
40260 +#endif
40261 +
40262 +#ifdef CONFIG_PAX_EMUTRAMP
40263 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40264 + pax_flags |= MF_PAX_EMUTRAMP;
40265 +#endif
40266 +
40267 +#ifdef CONFIG_PAX_MPROTECT
40268 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40269 + pax_flags |= MF_PAX_MPROTECT;
40270 +#endif
40271 +
40272 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40273 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40274 + pax_flags |= MF_PAX_RANDMMAP;
40275 +#endif
40276 +
40277 + return pax_flags;
40278 +}
40279 +#endif
40280 +
40281 +static unsigned long pax_parse_xattr_pax(struct file * const file)
40282 +{
40283 +
40284 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40285 + ssize_t xattr_size, i;
40286 + unsigned char xattr_value[5];
40287 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40288 +
40289 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40290 + if (xattr_size <= 0)
40291 + return ~0UL;
40292 +
40293 + for (i = 0; i < xattr_size; i++)
40294 + switch (xattr_value[i]) {
40295 + default:
40296 + return ~0UL;
40297 +
40298 +#define parse_flag(option1, option2, flag) \
40299 + case option1: \
40300 + pax_flags_hardmode |= MF_PAX_##flag; \
40301 + break; \
40302 + case option2: \
40303 + pax_flags_softmode |= MF_PAX_##flag; \
40304 + break;
40305 +
40306 + parse_flag('p', 'P', PAGEEXEC);
40307 + parse_flag('e', 'E', EMUTRAMP);
40308 + parse_flag('m', 'M', MPROTECT);
40309 + parse_flag('r', 'R', RANDMMAP);
40310 + parse_flag('s', 'S', SEGMEXEC);
40311 +
40312 +#undef parse_flag
40313 + }
40314 +
40315 + if (pax_flags_hardmode & pax_flags_softmode)
40316 + return ~0UL;
40317 +
40318 +#ifdef CONFIG_PAX_SOFTMODE
40319 + if (pax_softmode)
40320 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40321 + else
40322 +#endif
40323 +
40324 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40325 +#else
40326 + return ~0UL;
40327 +#endif
40328 +
40329 +}
40330 +
40331 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40332 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40333 +{
40334 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40335 +
40336 + pax_flags = pax_parse_ei_pax(elf_ex);
40337 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40338 + xattr_pax_flags = pax_parse_xattr_pax(file);
40339 +
40340 + if (pt_pax_flags == ~0UL)
40341 + pt_pax_flags = xattr_pax_flags;
40342 + else if (xattr_pax_flags == ~0UL)
40343 + xattr_pax_flags = pt_pax_flags;
40344 + if (pt_pax_flags != xattr_pax_flags)
40345 + return -EINVAL;
40346 + if (pt_pax_flags != ~0UL)
40347 + pax_flags = pt_pax_flags;
40348 +
40349 + if (0 > pax_check_flags(&pax_flags))
40350 + return -EINVAL;
40351 +
40352 + current->mm->pax_flags = pax_flags;
40353 + return 0;
40354 +}
40355 +#endif
40356 +
40357 /*
40358 * These are the functions used to load ELF style executables and shared
40359 * libraries. There is no binary dependent code anywhere else.
40360 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40361 {
40362 unsigned int random_variable = 0;
40363
40364 +#ifdef CONFIG_PAX_RANDUSTACK
40365 + if (randomize_va_space)
40366 + return stack_top - current->mm->delta_stack;
40367 +#endif
40368 +
40369 if ((current->flags & PF_RANDOMIZE) &&
40370 !(current->personality & ADDR_NO_RANDOMIZE)) {
40371 random_variable = get_random_int() & STACK_RND_MASK;
40372 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40373 unsigned long load_addr = 0, load_bias = 0;
40374 int load_addr_set = 0;
40375 char * elf_interpreter = NULL;
40376 - unsigned long error;
40377 + unsigned long error = 0;
40378 struct elf_phdr *elf_ppnt, *elf_phdata;
40379 unsigned long elf_bss, elf_brk;
40380 int retval, i;
40381 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40382 unsigned long start_code, end_code, start_data, end_data;
40383 unsigned long reloc_func_desc __maybe_unused = 0;
40384 int executable_stack = EXSTACK_DEFAULT;
40385 - unsigned long def_flags = 0;
40386 struct {
40387 struct elfhdr elf_ex;
40388 struct elfhdr interp_elf_ex;
40389 } *loc;
40390 + unsigned long pax_task_size = TASK_SIZE;
40391
40392 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40393 if (!loc) {
40394 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40395
40396 /* OK, This is the point of no return */
40397 current->flags &= ~PF_FORKNOEXEC;
40398 - current->mm->def_flags = def_flags;
40399 +
40400 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40401 + current->mm->pax_flags = 0UL;
40402 +#endif
40403 +
40404 +#ifdef CONFIG_PAX_DLRESOLVE
40405 + current->mm->call_dl_resolve = 0UL;
40406 +#endif
40407 +
40408 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40409 + current->mm->call_syscall = 0UL;
40410 +#endif
40411 +
40412 +#ifdef CONFIG_PAX_ASLR
40413 + current->mm->delta_mmap = 0UL;
40414 + current->mm->delta_stack = 0UL;
40415 +#endif
40416 +
40417 + current->mm->def_flags = 0;
40418 +
40419 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40420 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40421 + send_sig(SIGKILL, current, 0);
40422 + goto out_free_dentry;
40423 + }
40424 +#endif
40425 +
40426 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40427 + pax_set_initial_flags(bprm);
40428 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40429 + if (pax_set_initial_flags_func)
40430 + (pax_set_initial_flags_func)(bprm);
40431 +#endif
40432 +
40433 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40434 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40435 + current->mm->context.user_cs_limit = PAGE_SIZE;
40436 + current->mm->def_flags |= VM_PAGEEXEC;
40437 + }
40438 +#endif
40439 +
40440 +#ifdef CONFIG_PAX_SEGMEXEC
40441 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40442 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40443 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40444 + pax_task_size = SEGMEXEC_TASK_SIZE;
40445 + current->mm->def_flags |= VM_NOHUGEPAGE;
40446 + }
40447 +#endif
40448 +
40449 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40450 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40451 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40452 + put_cpu();
40453 + }
40454 +#endif
40455
40456 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40457 may depend on the personality. */
40458 SET_PERSONALITY(loc->elf_ex);
40459 +
40460 +#ifdef CONFIG_PAX_ASLR
40461 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40462 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40463 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40464 + }
40465 +#endif
40466 +
40467 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40468 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40469 + executable_stack = EXSTACK_DISABLE_X;
40470 + current->personality &= ~READ_IMPLIES_EXEC;
40471 + } else
40472 +#endif
40473 +
40474 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40475 current->personality |= READ_IMPLIES_EXEC;
40476
40477 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40478 #else
40479 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40480 #endif
40481 +
40482 +#ifdef CONFIG_PAX_RANDMMAP
40483 + /* PaX: randomize base address at the default exe base if requested */
40484 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40485 +#ifdef CONFIG_SPARC64
40486 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40487 +#else
40488 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40489 +#endif
40490 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40491 + elf_flags |= MAP_FIXED;
40492 + }
40493 +#endif
40494 +
40495 }
40496
40497 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40498 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40499 * allowed task size. Note that p_filesz must always be
40500 * <= p_memsz so it is only necessary to check p_memsz.
40501 */
40502 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40503 - elf_ppnt->p_memsz > TASK_SIZE ||
40504 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40505 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40506 + elf_ppnt->p_memsz > pax_task_size ||
40507 + pax_task_size - elf_ppnt->p_memsz < k) {
40508 /* set_brk can never work. Avoid overflows. */
40509 send_sig(SIGKILL, current, 0);
40510 retval = -EINVAL;
40511 @@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40512 start_data += load_bias;
40513 end_data += load_bias;
40514
40515 +#ifdef CONFIG_PAX_RANDMMAP
40516 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40517 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40518 +#endif
40519 +
40520 /* Calling set_brk effectively mmaps the pages that we need
40521 * for the bss and break sections. We must do this before
40522 * mapping in the interpreter, to make sure it doesn't wind
40523 @@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40524 goto out_free_dentry;
40525 }
40526 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40527 - send_sig(SIGSEGV, current, 0);
40528 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40529 - goto out_free_dentry;
40530 + /*
40531 + * This bss-zeroing can fail if the ELF
40532 + * file specifies odd protections. So
40533 + * we don't check the return value
40534 + */
40535 }
40536
40537 if (elf_interpreter) {
40538 @@ -1098,7 +1563,7 @@ out:
40539 * Decide what to dump of a segment, part, all or none.
40540 */
40541 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40542 - unsigned long mm_flags)
40543 + unsigned long mm_flags, long signr)
40544 {
40545 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40546
40547 @@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40548 if (vma->vm_file == NULL)
40549 return 0;
40550
40551 - if (FILTER(MAPPED_PRIVATE))
40552 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40553 goto whole;
40554
40555 /*
40556 @@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40557 {
40558 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40559 int i = 0;
40560 - do
40561 + do {
40562 i += 2;
40563 - while (auxv[i - 2] != AT_NULL);
40564 + } while (auxv[i - 2] != AT_NULL);
40565 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40566 }
40567
40568 @@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40569 }
40570
40571 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40572 - unsigned long mm_flags)
40573 + struct coredump_params *cprm)
40574 {
40575 struct vm_area_struct *vma;
40576 size_t size = 0;
40577
40578 for (vma = first_vma(current, gate_vma); vma != NULL;
40579 vma = next_vma(vma, gate_vma))
40580 - size += vma_dump_size(vma, mm_flags);
40581 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40582 return size;
40583 }
40584
40585 @@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40586
40587 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40588
40589 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40590 + offset += elf_core_vma_data_size(gate_vma, cprm);
40591 offset += elf_core_extra_data_size();
40592 e_shoff = offset;
40593
40594 @@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40595 offset = dataoff;
40596
40597 size += sizeof(*elf);
40598 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40599 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40600 goto end_coredump;
40601
40602 size += sizeof(*phdr4note);
40603 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40604 if (size > cprm->limit
40605 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40606 goto end_coredump;
40607 @@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40608 phdr.p_offset = offset;
40609 phdr.p_vaddr = vma->vm_start;
40610 phdr.p_paddr = 0;
40611 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40612 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40613 phdr.p_memsz = vma->vm_end - vma->vm_start;
40614 offset += phdr.p_filesz;
40615 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40616 @@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40617 phdr.p_align = ELF_EXEC_PAGESIZE;
40618
40619 size += sizeof(phdr);
40620 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40621 if (size > cprm->limit
40622 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40623 goto end_coredump;
40624 @@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40625 unsigned long addr;
40626 unsigned long end;
40627
40628 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40629 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40630
40631 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40632 struct page *page;
40633 @@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40634 page = get_dump_page(addr);
40635 if (page) {
40636 void *kaddr = kmap(page);
40637 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40638 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40639 !dump_write(cprm->file, kaddr,
40640 PAGE_SIZE);
40641 @@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40642
40643 if (e_phnum == PN_XNUM) {
40644 size += sizeof(*shdr4extnum);
40645 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40646 if (size > cprm->limit
40647 || !dump_write(cprm->file, shdr4extnum,
40648 sizeof(*shdr4extnum)))
40649 @@ -2075,6 +2545,97 @@ out:
40650
40651 #endif /* CONFIG_ELF_CORE */
40652
40653 +#ifdef CONFIG_PAX_MPROTECT
40654 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40655 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40656 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40657 + *
40658 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40659 + * basis because we want to allow the common case and not the special ones.
40660 + */
40661 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40662 +{
40663 + struct elfhdr elf_h;
40664 + struct elf_phdr elf_p;
40665 + unsigned long i;
40666 + unsigned long oldflags;
40667 + bool is_textrel_rw, is_textrel_rx, is_relro;
40668 +
40669 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40670 + return;
40671 +
40672 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40673 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40674 +
40675 +#ifdef CONFIG_PAX_ELFRELOCS
40676 + /* possible TEXTREL */
40677 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40678 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40679 +#else
40680 + is_textrel_rw = false;
40681 + is_textrel_rx = false;
40682 +#endif
40683 +
40684 + /* possible RELRO */
40685 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40686 +
40687 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40688 + return;
40689 +
40690 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40691 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40692 +
40693 +#ifdef CONFIG_PAX_ETEXECRELOCS
40694 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40695 +#else
40696 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40697 +#endif
40698 +
40699 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40700 + !elf_check_arch(&elf_h) ||
40701 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40702 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40703 + return;
40704 +
40705 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40706 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40707 + return;
40708 + switch (elf_p.p_type) {
40709 + case PT_DYNAMIC:
40710 + if (!is_textrel_rw && !is_textrel_rx)
40711 + continue;
40712 + i = 0UL;
40713 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40714 + elf_dyn dyn;
40715 +
40716 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40717 + return;
40718 + if (dyn.d_tag == DT_NULL)
40719 + return;
40720 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40721 + gr_log_textrel(vma);
40722 + if (is_textrel_rw)
40723 + vma->vm_flags |= VM_MAYWRITE;
40724 + else
40725 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40726 + vma->vm_flags &= ~VM_MAYWRITE;
40727 + return;
40728 + }
40729 + i++;
40730 + }
40731 + return;
40732 +
40733 + case PT_GNU_RELRO:
40734 + if (!is_relro)
40735 + continue;
40736 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40737 + vma->vm_flags &= ~VM_MAYWRITE;
40738 + return;
40739 + }
40740 + }
40741 +}
40742 +#endif
40743 +
40744 static int __init init_elf_binfmt(void)
40745 {
40746 return register_binfmt(&elf_format);
40747 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40748 index 1bffbe0..c8c283e 100644
40749 --- a/fs/binfmt_flat.c
40750 +++ b/fs/binfmt_flat.c
40751 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40752 realdatastart = (unsigned long) -ENOMEM;
40753 printk("Unable to allocate RAM for process data, errno %d\n",
40754 (int)-realdatastart);
40755 + down_write(&current->mm->mmap_sem);
40756 do_munmap(current->mm, textpos, text_len);
40757 + up_write(&current->mm->mmap_sem);
40758 ret = realdatastart;
40759 goto err;
40760 }
40761 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40762 }
40763 if (IS_ERR_VALUE(result)) {
40764 printk("Unable to read data+bss, errno %d\n", (int)-result);
40765 + down_write(&current->mm->mmap_sem);
40766 do_munmap(current->mm, textpos, text_len);
40767 do_munmap(current->mm, realdatastart, len);
40768 + up_write(&current->mm->mmap_sem);
40769 ret = result;
40770 goto err;
40771 }
40772 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40773 }
40774 if (IS_ERR_VALUE(result)) {
40775 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40776 + down_write(&current->mm->mmap_sem);
40777 do_munmap(current->mm, textpos, text_len + data_len + extra +
40778 MAX_SHARED_LIBS * sizeof(unsigned long));
40779 + up_write(&current->mm->mmap_sem);
40780 ret = result;
40781 goto err;
40782 }
40783 diff --git a/fs/bio.c b/fs/bio.c
40784 index b1fe82c..84da0a9 100644
40785 --- a/fs/bio.c
40786 +++ b/fs/bio.c
40787 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40788 const int read = bio_data_dir(bio) == READ;
40789 struct bio_map_data *bmd = bio->bi_private;
40790 int i;
40791 - char *p = bmd->sgvecs[0].iov_base;
40792 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40793
40794 __bio_for_each_segment(bvec, bio, i, 0) {
40795 char *addr = page_address(bvec->bv_page);
40796 diff --git a/fs/block_dev.c b/fs/block_dev.c
40797 index b07f1da..9efcb92 100644
40798 --- a/fs/block_dev.c
40799 +++ b/fs/block_dev.c
40800 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40801 else if (bdev->bd_contains == bdev)
40802 return true; /* is a whole device which isn't held */
40803
40804 - else if (whole->bd_holder == bd_may_claim)
40805 + else if (whole->bd_holder == (void *)bd_may_claim)
40806 return true; /* is a partition of a device that is being partitioned */
40807 else if (whole->bd_holder != NULL)
40808 return false; /* is a partition of a held device */
40809 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40810 index dede441..f2a2507 100644
40811 --- a/fs/btrfs/ctree.c
40812 +++ b/fs/btrfs/ctree.c
40813 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40814 free_extent_buffer(buf);
40815 add_root_to_dirty_list(root);
40816 } else {
40817 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40818 - parent_start = parent->start;
40819 - else
40820 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40821 + if (parent)
40822 + parent_start = parent->start;
40823 + else
40824 + parent_start = 0;
40825 + } else
40826 parent_start = 0;
40827
40828 WARN_ON(trans->transid != btrfs_header_generation(parent));
40829 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40830 index fd1a06d..6e9033d 100644
40831 --- a/fs/btrfs/inode.c
40832 +++ b/fs/btrfs/inode.c
40833 @@ -6895,7 +6895,7 @@ fail:
40834 return -ENOMEM;
40835 }
40836
40837 -static int btrfs_getattr(struct vfsmount *mnt,
40838 +int btrfs_getattr(struct vfsmount *mnt,
40839 struct dentry *dentry, struct kstat *stat)
40840 {
40841 struct inode *inode = dentry->d_inode;
40842 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40843 return 0;
40844 }
40845
40846 +EXPORT_SYMBOL(btrfs_getattr);
40847 +
40848 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40849 +{
40850 + return BTRFS_I(inode)->root->anon_dev;
40851 +}
40852 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40853 +
40854 /*
40855 * If a file is moved, it will inherit the cow and compression flags of the new
40856 * directory.
40857 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40858 index c04f02c..f5c9e2e 100644
40859 --- a/fs/btrfs/ioctl.c
40860 +++ b/fs/btrfs/ioctl.c
40861 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40862 for (i = 0; i < num_types; i++) {
40863 struct btrfs_space_info *tmp;
40864
40865 + /* Don't copy in more than we allocated */
40866 if (!slot_count)
40867 break;
40868
40869 + slot_count--;
40870 +
40871 info = NULL;
40872 rcu_read_lock();
40873 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40874 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40875 memcpy(dest, &space, sizeof(space));
40876 dest++;
40877 space_args.total_spaces++;
40878 - slot_count--;
40879 }
40880 - if (!slot_count)
40881 - break;
40882 }
40883 up_read(&info->groups_sem);
40884 }
40885
40886 - user_dest = (struct btrfs_ioctl_space_info *)
40887 + user_dest = (struct btrfs_ioctl_space_info __user *)
40888 (arg + sizeof(struct btrfs_ioctl_space_args));
40889
40890 if (copy_to_user(user_dest, dest_orig, alloc_size))
40891 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40892 index cfb5543..1ae7347 100644
40893 --- a/fs/btrfs/relocation.c
40894 +++ b/fs/btrfs/relocation.c
40895 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40896 }
40897 spin_unlock(&rc->reloc_root_tree.lock);
40898
40899 - BUG_ON((struct btrfs_root *)node->data != root);
40900 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40901
40902 if (!del) {
40903 spin_lock(&rc->reloc_root_tree.lock);
40904 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40905 index 622f469..e8d2d55 100644
40906 --- a/fs/cachefiles/bind.c
40907 +++ b/fs/cachefiles/bind.c
40908 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40909 args);
40910
40911 /* start by checking things over */
40912 - ASSERT(cache->fstop_percent >= 0 &&
40913 - cache->fstop_percent < cache->fcull_percent &&
40914 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40915 cache->fcull_percent < cache->frun_percent &&
40916 cache->frun_percent < 100);
40917
40918 - ASSERT(cache->bstop_percent >= 0 &&
40919 - cache->bstop_percent < cache->bcull_percent &&
40920 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40921 cache->bcull_percent < cache->brun_percent &&
40922 cache->brun_percent < 100);
40923
40924 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40925 index 0a1467b..6a53245 100644
40926 --- a/fs/cachefiles/daemon.c
40927 +++ b/fs/cachefiles/daemon.c
40928 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40929 if (n > buflen)
40930 return -EMSGSIZE;
40931
40932 - if (copy_to_user(_buffer, buffer, n) != 0)
40933 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40934 return -EFAULT;
40935
40936 return n;
40937 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40938 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40939 return -EIO;
40940
40941 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40942 + if (datalen > PAGE_SIZE - 1)
40943 return -EOPNOTSUPP;
40944
40945 /* drag the command string into the kernel so we can parse it */
40946 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40947 if (args[0] != '%' || args[1] != '\0')
40948 return -EINVAL;
40949
40950 - if (fstop < 0 || fstop >= cache->fcull_percent)
40951 + if (fstop >= cache->fcull_percent)
40952 return cachefiles_daemon_range_error(cache, args);
40953
40954 cache->fstop_percent = fstop;
40955 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40956 if (args[0] != '%' || args[1] != '\0')
40957 return -EINVAL;
40958
40959 - if (bstop < 0 || bstop >= cache->bcull_percent)
40960 + if (bstop >= cache->bcull_percent)
40961 return cachefiles_daemon_range_error(cache, args);
40962
40963 cache->bstop_percent = bstop;
40964 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40965 index bd6bc1b..b627b53 100644
40966 --- a/fs/cachefiles/internal.h
40967 +++ b/fs/cachefiles/internal.h
40968 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40969 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40970 struct rb_root active_nodes; /* active nodes (can't be culled) */
40971 rwlock_t active_lock; /* lock for active_nodes */
40972 - atomic_t gravecounter; /* graveyard uniquifier */
40973 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40974 unsigned frun_percent; /* when to stop culling (% files) */
40975 unsigned fcull_percent; /* when to start culling (% files) */
40976 unsigned fstop_percent; /* when to stop allocating (% files) */
40977 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40978 * proc.c
40979 */
40980 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40981 -extern atomic_t cachefiles_lookup_histogram[HZ];
40982 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40983 -extern atomic_t cachefiles_create_histogram[HZ];
40984 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40985 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40986 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40987
40988 extern int __init cachefiles_proc_init(void);
40989 extern void cachefiles_proc_cleanup(void);
40990 static inline
40991 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40992 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40993 {
40994 unsigned long jif = jiffies - start_jif;
40995 if (jif >= HZ)
40996 jif = HZ - 1;
40997 - atomic_inc(&histogram[jif]);
40998 + atomic_inc_unchecked(&histogram[jif]);
40999 }
41000
41001 #else
41002 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
41003 index a0358c2..d6137f2 100644
41004 --- a/fs/cachefiles/namei.c
41005 +++ b/fs/cachefiles/namei.c
41006 @@ -318,7 +318,7 @@ try_again:
41007 /* first step is to make up a grave dentry in the graveyard */
41008 sprintf(nbuffer, "%08x%08x",
41009 (uint32_t) get_seconds(),
41010 - (uint32_t) atomic_inc_return(&cache->gravecounter));
41011 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
41012
41013 /* do the multiway lock magic */
41014 trap = lock_rename(cache->graveyard, dir);
41015 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
41016 index eccd339..4c1d995 100644
41017 --- a/fs/cachefiles/proc.c
41018 +++ b/fs/cachefiles/proc.c
41019 @@ -14,9 +14,9 @@
41020 #include <linux/seq_file.h>
41021 #include "internal.h"
41022
41023 -atomic_t cachefiles_lookup_histogram[HZ];
41024 -atomic_t cachefiles_mkdir_histogram[HZ];
41025 -atomic_t cachefiles_create_histogram[HZ];
41026 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
41027 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
41028 +atomic_unchecked_t cachefiles_create_histogram[HZ];
41029
41030 /*
41031 * display the latency histogram
41032 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
41033 return 0;
41034 default:
41035 index = (unsigned long) v - 3;
41036 - x = atomic_read(&cachefiles_lookup_histogram[index]);
41037 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
41038 - z = atomic_read(&cachefiles_create_histogram[index]);
41039 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
41040 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
41041 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
41042 if (x == 0 && y == 0 && z == 0)
41043 return 0;
41044
41045 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
41046 index 0e3c092..818480e 100644
41047 --- a/fs/cachefiles/rdwr.c
41048 +++ b/fs/cachefiles/rdwr.c
41049 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
41050 old_fs = get_fs();
41051 set_fs(KERNEL_DS);
41052 ret = file->f_op->write(
41053 - file, (const void __user *) data, len, &pos);
41054 + file, (const void __force_user *) data, len, &pos);
41055 set_fs(old_fs);
41056 kunmap(page);
41057 if (ret != len)
41058 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
41059 index 9895400..fa40a7d 100644
41060 --- a/fs/ceph/dir.c
41061 +++ b/fs/ceph/dir.c
41062 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
41063 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
41064 struct ceph_mds_client *mdsc = fsc->mdsc;
41065 unsigned frag = fpos_frag(filp->f_pos);
41066 - int off = fpos_off(filp->f_pos);
41067 + unsigned int off = fpos_off(filp->f_pos);
41068 int err;
41069 u32 ftype;
41070 struct ceph_mds_reply_info_parsed *rinfo;
41071 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
41072 index 84e8c07..6170d31 100644
41073 --- a/fs/cifs/cifs_debug.c
41074 +++ b/fs/cifs/cifs_debug.c
41075 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41076
41077 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
41078 #ifdef CONFIG_CIFS_STATS2
41079 - atomic_set(&totBufAllocCount, 0);
41080 - atomic_set(&totSmBufAllocCount, 0);
41081 + atomic_set_unchecked(&totBufAllocCount, 0);
41082 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41083 #endif /* CONFIG_CIFS_STATS2 */
41084 spin_lock(&cifs_tcp_ses_lock);
41085 list_for_each(tmp1, &cifs_tcp_ses_list) {
41086 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41087 tcon = list_entry(tmp3,
41088 struct cifs_tcon,
41089 tcon_list);
41090 - atomic_set(&tcon->num_smbs_sent, 0);
41091 - atomic_set(&tcon->num_writes, 0);
41092 - atomic_set(&tcon->num_reads, 0);
41093 - atomic_set(&tcon->num_oplock_brks, 0);
41094 - atomic_set(&tcon->num_opens, 0);
41095 - atomic_set(&tcon->num_posixopens, 0);
41096 - atomic_set(&tcon->num_posixmkdirs, 0);
41097 - atomic_set(&tcon->num_closes, 0);
41098 - atomic_set(&tcon->num_deletes, 0);
41099 - atomic_set(&tcon->num_mkdirs, 0);
41100 - atomic_set(&tcon->num_rmdirs, 0);
41101 - atomic_set(&tcon->num_renames, 0);
41102 - atomic_set(&tcon->num_t2renames, 0);
41103 - atomic_set(&tcon->num_ffirst, 0);
41104 - atomic_set(&tcon->num_fnext, 0);
41105 - atomic_set(&tcon->num_fclose, 0);
41106 - atomic_set(&tcon->num_hardlinks, 0);
41107 - atomic_set(&tcon->num_symlinks, 0);
41108 - atomic_set(&tcon->num_locks, 0);
41109 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
41110 + atomic_set_unchecked(&tcon->num_writes, 0);
41111 + atomic_set_unchecked(&tcon->num_reads, 0);
41112 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
41113 + atomic_set_unchecked(&tcon->num_opens, 0);
41114 + atomic_set_unchecked(&tcon->num_posixopens, 0);
41115 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
41116 + atomic_set_unchecked(&tcon->num_closes, 0);
41117 + atomic_set_unchecked(&tcon->num_deletes, 0);
41118 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
41119 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
41120 + atomic_set_unchecked(&tcon->num_renames, 0);
41121 + atomic_set_unchecked(&tcon->num_t2renames, 0);
41122 + atomic_set_unchecked(&tcon->num_ffirst, 0);
41123 + atomic_set_unchecked(&tcon->num_fnext, 0);
41124 + atomic_set_unchecked(&tcon->num_fclose, 0);
41125 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
41126 + atomic_set_unchecked(&tcon->num_symlinks, 0);
41127 + atomic_set_unchecked(&tcon->num_locks, 0);
41128 }
41129 }
41130 }
41131 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41132 smBufAllocCount.counter, cifs_min_small);
41133 #ifdef CONFIG_CIFS_STATS2
41134 seq_printf(m, "Total Large %d Small %d Allocations\n",
41135 - atomic_read(&totBufAllocCount),
41136 - atomic_read(&totSmBufAllocCount));
41137 + atomic_read_unchecked(&totBufAllocCount),
41138 + atomic_read_unchecked(&totSmBufAllocCount));
41139 #endif /* CONFIG_CIFS_STATS2 */
41140
41141 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
41142 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41143 if (tcon->need_reconnect)
41144 seq_puts(m, "\tDISCONNECTED ");
41145 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
41146 - atomic_read(&tcon->num_smbs_sent),
41147 - atomic_read(&tcon->num_oplock_brks));
41148 + atomic_read_unchecked(&tcon->num_smbs_sent),
41149 + atomic_read_unchecked(&tcon->num_oplock_brks));
41150 seq_printf(m, "\nReads: %d Bytes: %lld",
41151 - atomic_read(&tcon->num_reads),
41152 + atomic_read_unchecked(&tcon->num_reads),
41153 (long long)(tcon->bytes_read));
41154 seq_printf(m, "\nWrites: %d Bytes: %lld",
41155 - atomic_read(&tcon->num_writes),
41156 + atomic_read_unchecked(&tcon->num_writes),
41157 (long long)(tcon->bytes_written));
41158 seq_printf(m, "\nFlushes: %d",
41159 - atomic_read(&tcon->num_flushes));
41160 + atomic_read_unchecked(&tcon->num_flushes));
41161 seq_printf(m, "\nLocks: %d HardLinks: %d "
41162 "Symlinks: %d",
41163 - atomic_read(&tcon->num_locks),
41164 - atomic_read(&tcon->num_hardlinks),
41165 - atomic_read(&tcon->num_symlinks));
41166 + atomic_read_unchecked(&tcon->num_locks),
41167 + atomic_read_unchecked(&tcon->num_hardlinks),
41168 + atomic_read_unchecked(&tcon->num_symlinks));
41169 seq_printf(m, "\nOpens: %d Closes: %d "
41170 "Deletes: %d",
41171 - atomic_read(&tcon->num_opens),
41172 - atomic_read(&tcon->num_closes),
41173 - atomic_read(&tcon->num_deletes));
41174 + atomic_read_unchecked(&tcon->num_opens),
41175 + atomic_read_unchecked(&tcon->num_closes),
41176 + atomic_read_unchecked(&tcon->num_deletes));
41177 seq_printf(m, "\nPosix Opens: %d "
41178 "Posix Mkdirs: %d",
41179 - atomic_read(&tcon->num_posixopens),
41180 - atomic_read(&tcon->num_posixmkdirs));
41181 + atomic_read_unchecked(&tcon->num_posixopens),
41182 + atomic_read_unchecked(&tcon->num_posixmkdirs));
41183 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
41184 - atomic_read(&tcon->num_mkdirs),
41185 - atomic_read(&tcon->num_rmdirs));
41186 + atomic_read_unchecked(&tcon->num_mkdirs),
41187 + atomic_read_unchecked(&tcon->num_rmdirs));
41188 seq_printf(m, "\nRenames: %d T2 Renames %d",
41189 - atomic_read(&tcon->num_renames),
41190 - atomic_read(&tcon->num_t2renames));
41191 + atomic_read_unchecked(&tcon->num_renames),
41192 + atomic_read_unchecked(&tcon->num_t2renames));
41193 seq_printf(m, "\nFindFirst: %d FNext %d "
41194 "FClose %d",
41195 - atomic_read(&tcon->num_ffirst),
41196 - atomic_read(&tcon->num_fnext),
41197 - atomic_read(&tcon->num_fclose));
41198 + atomic_read_unchecked(&tcon->num_ffirst),
41199 + atomic_read_unchecked(&tcon->num_fnext),
41200 + atomic_read_unchecked(&tcon->num_fclose));
41201 }
41202 }
41203 }
41204 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
41205 index 8f1fe32..38f9e27 100644
41206 --- a/fs/cifs/cifsfs.c
41207 +++ b/fs/cifs/cifsfs.c
41208 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
41209 cifs_req_cachep = kmem_cache_create("cifs_request",
41210 CIFSMaxBufSize +
41211 MAX_CIFS_HDR_SIZE, 0,
41212 - SLAB_HWCACHE_ALIGN, NULL);
41213 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41214 if (cifs_req_cachep == NULL)
41215 return -ENOMEM;
41216
41217 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41218 efficient to alloc 1 per page off the slab compared to 17K (5page)
41219 alloc of large cifs buffers even when page debugging is on */
41220 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41221 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41222 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41223 NULL);
41224 if (cifs_sm_req_cachep == NULL) {
41225 mempool_destroy(cifs_req_poolp);
41226 @@ -1101,8 +1101,8 @@ init_cifs(void)
41227 atomic_set(&bufAllocCount, 0);
41228 atomic_set(&smBufAllocCount, 0);
41229 #ifdef CONFIG_CIFS_STATS2
41230 - atomic_set(&totBufAllocCount, 0);
41231 - atomic_set(&totSmBufAllocCount, 0);
41232 + atomic_set_unchecked(&totBufAllocCount, 0);
41233 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41234 #endif /* CONFIG_CIFS_STATS2 */
41235
41236 atomic_set(&midCount, 0);
41237 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41238 index 8238aa1..0347196 100644
41239 --- a/fs/cifs/cifsglob.h
41240 +++ b/fs/cifs/cifsglob.h
41241 @@ -392,28 +392,28 @@ struct cifs_tcon {
41242 __u16 Flags; /* optional support bits */
41243 enum statusEnum tidStatus;
41244 #ifdef CONFIG_CIFS_STATS
41245 - atomic_t num_smbs_sent;
41246 - atomic_t num_writes;
41247 - atomic_t num_reads;
41248 - atomic_t num_flushes;
41249 - atomic_t num_oplock_brks;
41250 - atomic_t num_opens;
41251 - atomic_t num_closes;
41252 - atomic_t num_deletes;
41253 - atomic_t num_mkdirs;
41254 - atomic_t num_posixopens;
41255 - atomic_t num_posixmkdirs;
41256 - atomic_t num_rmdirs;
41257 - atomic_t num_renames;
41258 - atomic_t num_t2renames;
41259 - atomic_t num_ffirst;
41260 - atomic_t num_fnext;
41261 - atomic_t num_fclose;
41262 - atomic_t num_hardlinks;
41263 - atomic_t num_symlinks;
41264 - atomic_t num_locks;
41265 - atomic_t num_acl_get;
41266 - atomic_t num_acl_set;
41267 + atomic_unchecked_t num_smbs_sent;
41268 + atomic_unchecked_t num_writes;
41269 + atomic_unchecked_t num_reads;
41270 + atomic_unchecked_t num_flushes;
41271 + atomic_unchecked_t num_oplock_brks;
41272 + atomic_unchecked_t num_opens;
41273 + atomic_unchecked_t num_closes;
41274 + atomic_unchecked_t num_deletes;
41275 + atomic_unchecked_t num_mkdirs;
41276 + atomic_unchecked_t num_posixopens;
41277 + atomic_unchecked_t num_posixmkdirs;
41278 + atomic_unchecked_t num_rmdirs;
41279 + atomic_unchecked_t num_renames;
41280 + atomic_unchecked_t num_t2renames;
41281 + atomic_unchecked_t num_ffirst;
41282 + atomic_unchecked_t num_fnext;
41283 + atomic_unchecked_t num_fclose;
41284 + atomic_unchecked_t num_hardlinks;
41285 + atomic_unchecked_t num_symlinks;
41286 + atomic_unchecked_t num_locks;
41287 + atomic_unchecked_t num_acl_get;
41288 + atomic_unchecked_t num_acl_set;
41289 #ifdef CONFIG_CIFS_STATS2
41290 unsigned long long time_writes;
41291 unsigned long long time_reads;
41292 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41293 }
41294
41295 #ifdef CONFIG_CIFS_STATS
41296 -#define cifs_stats_inc atomic_inc
41297 +#define cifs_stats_inc atomic_inc_unchecked
41298
41299 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41300 unsigned int bytes)
41301 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41302 /* Various Debug counters */
41303 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41304 #ifdef CONFIG_CIFS_STATS2
41305 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41306 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41307 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41308 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41309 #endif
41310 GLOBAL_EXTERN atomic_t smBufAllocCount;
41311 GLOBAL_EXTERN atomic_t midCount;
41312 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41313 index 6b0e064..94e6c3c 100644
41314 --- a/fs/cifs/link.c
41315 +++ b/fs/cifs/link.c
41316 @@ -600,7 +600,7 @@ symlink_exit:
41317
41318 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41319 {
41320 - char *p = nd_get_link(nd);
41321 + const char *p = nd_get_link(nd);
41322 if (!IS_ERR(p))
41323 kfree(p);
41324 }
41325 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41326 index 703ef5c..2a44ed5 100644
41327 --- a/fs/cifs/misc.c
41328 +++ b/fs/cifs/misc.c
41329 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41330 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41331 atomic_inc(&bufAllocCount);
41332 #ifdef CONFIG_CIFS_STATS2
41333 - atomic_inc(&totBufAllocCount);
41334 + atomic_inc_unchecked(&totBufAllocCount);
41335 #endif /* CONFIG_CIFS_STATS2 */
41336 }
41337
41338 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41339 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41340 atomic_inc(&smBufAllocCount);
41341 #ifdef CONFIG_CIFS_STATS2
41342 - atomic_inc(&totSmBufAllocCount);
41343 + atomic_inc_unchecked(&totSmBufAllocCount);
41344 #endif /* CONFIG_CIFS_STATS2 */
41345
41346 }
41347 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41348 index 6901578..d402eb5 100644
41349 --- a/fs/coda/cache.c
41350 +++ b/fs/coda/cache.c
41351 @@ -24,7 +24,7 @@
41352 #include "coda_linux.h"
41353 #include "coda_cache.h"
41354
41355 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41356 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41357
41358 /* replace or extend an acl cache hit */
41359 void coda_cache_enter(struct inode *inode, int mask)
41360 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41361 struct coda_inode_info *cii = ITOC(inode);
41362
41363 spin_lock(&cii->c_lock);
41364 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41365 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41366 if (cii->c_uid != current_fsuid()) {
41367 cii->c_uid = current_fsuid();
41368 cii->c_cached_perm = mask;
41369 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41370 {
41371 struct coda_inode_info *cii = ITOC(inode);
41372 spin_lock(&cii->c_lock);
41373 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41374 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41375 spin_unlock(&cii->c_lock);
41376 }
41377
41378 /* remove all acl caches */
41379 void coda_cache_clear_all(struct super_block *sb)
41380 {
41381 - atomic_inc(&permission_epoch);
41382 + atomic_inc_unchecked(&permission_epoch);
41383 }
41384
41385
41386 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41387 spin_lock(&cii->c_lock);
41388 hit = (mask & cii->c_cached_perm) == mask &&
41389 cii->c_uid == current_fsuid() &&
41390 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41391 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41392 spin_unlock(&cii->c_lock);
41393
41394 return hit;
41395 diff --git a/fs/compat.c b/fs/compat.c
41396 index c987875..08771ca 100644
41397 --- a/fs/compat.c
41398 +++ b/fs/compat.c
41399 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41400 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41401 {
41402 compat_ino_t ino = stat->ino;
41403 - typeof(ubuf->st_uid) uid = 0;
41404 - typeof(ubuf->st_gid) gid = 0;
41405 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41406 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41407 int err;
41408
41409 SET_UID(uid, stat->uid);
41410 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41411
41412 set_fs(KERNEL_DS);
41413 /* The __user pointer cast is valid because of the set_fs() */
41414 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41415 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41416 set_fs(oldfs);
41417 /* truncating is ok because it's a user address */
41418 if (!ret)
41419 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41420 goto out;
41421
41422 ret = -EINVAL;
41423 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41424 + if (nr_segs > UIO_MAXIOV)
41425 goto out;
41426 if (nr_segs > fast_segs) {
41427 ret = -ENOMEM;
41428 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41429
41430 struct compat_readdir_callback {
41431 struct compat_old_linux_dirent __user *dirent;
41432 + struct file * file;
41433 int result;
41434 };
41435
41436 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41437 buf->result = -EOVERFLOW;
41438 return -EOVERFLOW;
41439 }
41440 +
41441 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41442 + return 0;
41443 +
41444 buf->result++;
41445 dirent = buf->dirent;
41446 if (!access_ok(VERIFY_WRITE, dirent,
41447 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41448
41449 buf.result = 0;
41450 buf.dirent = dirent;
41451 + buf.file = file;
41452
41453 error = vfs_readdir(file, compat_fillonedir, &buf);
41454 if (buf.result)
41455 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
41456 struct compat_getdents_callback {
41457 struct compat_linux_dirent __user *current_dir;
41458 struct compat_linux_dirent __user *previous;
41459 + struct file * file;
41460 int count;
41461 int error;
41462 };
41463 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41464 buf->error = -EOVERFLOW;
41465 return -EOVERFLOW;
41466 }
41467 +
41468 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41469 + return 0;
41470 +
41471 dirent = buf->previous;
41472 if (dirent) {
41473 if (__put_user(offset, &dirent->d_off))
41474 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41475 buf.previous = NULL;
41476 buf.count = count;
41477 buf.error = 0;
41478 + buf.file = file;
41479
41480 error = vfs_readdir(file, compat_filldir, &buf);
41481 if (error >= 0)
41482 @@ -1003,6 +1015,7 @@ out:
41483 struct compat_getdents_callback64 {
41484 struct linux_dirent64 __user *current_dir;
41485 struct linux_dirent64 __user *previous;
41486 + struct file * file;
41487 int count;
41488 int error;
41489 };
41490 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41491 buf->error = -EINVAL; /* only used if we fail.. */
41492 if (reclen > buf->count)
41493 return -EINVAL;
41494 +
41495 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41496 + return 0;
41497 +
41498 dirent = buf->previous;
41499
41500 if (dirent) {
41501 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41502 buf.previous = NULL;
41503 buf.count = count;
41504 buf.error = 0;
41505 + buf.file = file;
41506
41507 error = vfs_readdir(file, compat_filldir64, &buf);
41508 if (error >= 0)
41509 error = buf.error;
41510 lastdirent = buf.previous;
41511 if (lastdirent) {
41512 - typeof(lastdirent->d_off) d_off = file->f_pos;
41513 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41514 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41515 error = -EFAULT;
41516 else
41517 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41518 index 112e45a..b59845b 100644
41519 --- a/fs/compat_binfmt_elf.c
41520 +++ b/fs/compat_binfmt_elf.c
41521 @@ -30,11 +30,13 @@
41522 #undef elf_phdr
41523 #undef elf_shdr
41524 #undef elf_note
41525 +#undef elf_dyn
41526 #undef elf_addr_t
41527 #define elfhdr elf32_hdr
41528 #define elf_phdr elf32_phdr
41529 #define elf_shdr elf32_shdr
41530 #define elf_note elf32_note
41531 +#define elf_dyn Elf32_Dyn
41532 #define elf_addr_t Elf32_Addr
41533
41534 /*
41535 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41536 index 51352de..93292ff 100644
41537 --- a/fs/compat_ioctl.c
41538 +++ b/fs/compat_ioctl.c
41539 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41540
41541 err = get_user(palp, &up->palette);
41542 err |= get_user(length, &up->length);
41543 + if (err)
41544 + return -EFAULT;
41545
41546 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41547 err = put_user(compat_ptr(palp), &up_native->palette);
41548 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41549 return -EFAULT;
41550 if (__get_user(udata, &ss32->iomem_base))
41551 return -EFAULT;
41552 - ss.iomem_base = compat_ptr(udata);
41553 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41554 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41555 __get_user(ss.port_high, &ss32->port_high))
41556 return -EFAULT;
41557 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41558 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41559 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41560 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41561 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41562 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41563 return -EFAULT;
41564
41565 return ioctl_preallocate(file, p);
41566 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41567 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41568 {
41569 unsigned int a, b;
41570 - a = *(unsigned int *)p;
41571 - b = *(unsigned int *)q;
41572 + a = *(const unsigned int *)p;
41573 + b = *(const unsigned int *)q;
41574 if (a > b)
41575 return 1;
41576 if (a < b)
41577 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41578 index 9a37a9b..35792b6 100644
41579 --- a/fs/configfs/dir.c
41580 +++ b/fs/configfs/dir.c
41581 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41582 }
41583 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41584 struct configfs_dirent *next;
41585 - const char * name;
41586 + const unsigned char * name;
41587 + char d_name[sizeof(next->s_dentry->d_iname)];
41588 int len;
41589 struct inode *inode = NULL;
41590
41591 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41592 continue;
41593
41594 name = configfs_get_name(next);
41595 - len = strlen(name);
41596 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41597 + len = next->s_dentry->d_name.len;
41598 + memcpy(d_name, name, len);
41599 + name = d_name;
41600 + } else
41601 + len = strlen(name);
41602
41603 /*
41604 * We'll have a dentry and an inode for
41605 diff --git a/fs/dcache.c b/fs/dcache.c
41606 index f7908ae..920a680 100644
41607 --- a/fs/dcache.c
41608 +++ b/fs/dcache.c
41609 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41610 mempages -= reserve;
41611
41612 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41613 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41614 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41615
41616 dcache_init();
41617 inode_init();
41618 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41619 index f3a257d..715ac0f 100644
41620 --- a/fs/debugfs/inode.c
41621 +++ b/fs/debugfs/inode.c
41622 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41623 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41624 {
41625 return debugfs_create_file(name,
41626 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41627 + S_IFDIR | S_IRWXU,
41628 +#else
41629 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41630 +#endif
41631 parent, NULL, NULL);
41632 }
41633 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41634 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41635 index d2039ca..a766407 100644
41636 --- a/fs/ecryptfs/inode.c
41637 +++ b/fs/ecryptfs/inode.c
41638 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41639 old_fs = get_fs();
41640 set_fs(get_ds());
41641 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41642 - (char __user *)lower_buf,
41643 + (char __force_user *)lower_buf,
41644 lower_bufsiz);
41645 set_fs(old_fs);
41646 if (rc < 0)
41647 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41648 }
41649 old_fs = get_fs();
41650 set_fs(get_ds());
41651 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41652 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41653 set_fs(old_fs);
41654 if (rc < 0) {
41655 kfree(buf);
41656 @@ -752,7 +752,7 @@ out:
41657 static void
41658 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41659 {
41660 - char *buf = nd_get_link(nd);
41661 + const char *buf = nd_get_link(nd);
41662 if (!IS_ERR(buf)) {
41663 /* Free the char* */
41664 kfree(buf);
41665 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41666 index 0dc5a3d..d3cdeea 100644
41667 --- a/fs/ecryptfs/miscdev.c
41668 +++ b/fs/ecryptfs/miscdev.c
41669 @@ -328,7 +328,7 @@ check_list:
41670 goto out_unlock_msg_ctx;
41671 i = 5;
41672 if (msg_ctx->msg) {
41673 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41674 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41675 goto out_unlock_msg_ctx;
41676 i += packet_length_size;
41677 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41678 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41679 index 608c1c3..7d040a8 100644
41680 --- a/fs/ecryptfs/read_write.c
41681 +++ b/fs/ecryptfs/read_write.c
41682 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41683 return -EIO;
41684 fs_save = get_fs();
41685 set_fs(get_ds());
41686 - rc = vfs_write(lower_file, data, size, &offset);
41687 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41688 set_fs(fs_save);
41689 mark_inode_dirty_sync(ecryptfs_inode);
41690 return rc;
41691 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41692 return -EIO;
41693 fs_save = get_fs();
41694 set_fs(get_ds());
41695 - rc = vfs_read(lower_file, data, size, &offset);
41696 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41697 set_fs(fs_save);
41698 return rc;
41699 }
41700 diff --git a/fs/exec.c b/fs/exec.c
41701 index 3625464..04855f9 100644
41702 --- a/fs/exec.c
41703 +++ b/fs/exec.c
41704 @@ -55,12 +55,28 @@
41705 #include <linux/pipe_fs_i.h>
41706 #include <linux/oom.h>
41707 #include <linux/compat.h>
41708 +#include <linux/random.h>
41709 +#include <linux/seq_file.h>
41710 +
41711 +#ifdef CONFIG_PAX_REFCOUNT
41712 +#include <linux/kallsyms.h>
41713 +#include <linux/kdebug.h>
41714 +#endif
41715
41716 #include <asm/uaccess.h>
41717 #include <asm/mmu_context.h>
41718 #include <asm/tlb.h>
41719 #include "internal.h"
41720
41721 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41722 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41723 +#endif
41724 +
41725 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41726 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41727 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41728 +#endif
41729 +
41730 int core_uses_pid;
41731 char core_pattern[CORENAME_MAX_SIZE] = "core";
41732 unsigned int core_pipe_limit;
41733 @@ -70,7 +86,7 @@ struct core_name {
41734 char *corename;
41735 int used, size;
41736 };
41737 -static atomic_t call_count = ATOMIC_INIT(1);
41738 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41739
41740 /* The maximal length of core_pattern is also specified in sysctl.c */
41741
41742 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41743 int write)
41744 {
41745 struct page *page;
41746 - int ret;
41747
41748 -#ifdef CONFIG_STACK_GROWSUP
41749 - if (write) {
41750 - ret = expand_downwards(bprm->vma, pos);
41751 - if (ret < 0)
41752 - return NULL;
41753 - }
41754 -#endif
41755 - ret = get_user_pages(current, bprm->mm, pos,
41756 - 1, write, 1, &page, NULL);
41757 - if (ret <= 0)
41758 + if (0 > expand_downwards(bprm->vma, pos))
41759 + return NULL;
41760 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41761 return NULL;
41762
41763 if (write) {
41764 @@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41765 if (size <= ARG_MAX)
41766 return page;
41767
41768 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41769 + // only allow 1MB for argv+env on suid/sgid binaries
41770 + // to prevent easy ASLR exhaustion
41771 + if (((bprm->cred->euid != current_euid()) ||
41772 + (bprm->cred->egid != current_egid())) &&
41773 + (size > (1024 * 1024))) {
41774 + put_page(page);
41775 + return NULL;
41776 + }
41777 +#endif
41778 +
41779 /*
41780 * Limit to 1/4-th the stack size for the argv+env strings.
41781 * This ensures that:
41782 @@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41783 vma->vm_end = STACK_TOP_MAX;
41784 vma->vm_start = vma->vm_end - PAGE_SIZE;
41785 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41786 +
41787 +#ifdef CONFIG_PAX_SEGMEXEC
41788 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41789 +#endif
41790 +
41791 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41792 INIT_LIST_HEAD(&vma->anon_vma_chain);
41793
41794 @@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41795 mm->stack_vm = mm->total_vm = 1;
41796 up_write(&mm->mmap_sem);
41797 bprm->p = vma->vm_end - sizeof(void *);
41798 +
41799 +#ifdef CONFIG_PAX_RANDUSTACK
41800 + if (randomize_va_space)
41801 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41802 +#endif
41803 +
41804 return 0;
41805 err:
41806 up_write(&mm->mmap_sem);
41807 @@ -396,19 +426,7 @@ err:
41808 return err;
41809 }
41810
41811 -struct user_arg_ptr {
41812 -#ifdef CONFIG_COMPAT
41813 - bool is_compat;
41814 -#endif
41815 - union {
41816 - const char __user *const __user *native;
41817 -#ifdef CONFIG_COMPAT
41818 - compat_uptr_t __user *compat;
41819 -#endif
41820 - } ptr;
41821 -};
41822 -
41823 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41824 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41825 {
41826 const char __user *native;
41827
41828 @@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41829 compat_uptr_t compat;
41830
41831 if (get_user(compat, argv.ptr.compat + nr))
41832 - return ERR_PTR(-EFAULT);
41833 + return (const char __force_user *)ERR_PTR(-EFAULT);
41834
41835 return compat_ptr(compat);
41836 }
41837 #endif
41838
41839 if (get_user(native, argv.ptr.native + nr))
41840 - return ERR_PTR(-EFAULT);
41841 + return (const char __force_user *)ERR_PTR(-EFAULT);
41842
41843 return native;
41844 }
41845 @@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
41846 if (!p)
41847 break;
41848
41849 - if (IS_ERR(p))
41850 + if (IS_ERR((const char __force_kernel *)p))
41851 return -EFAULT;
41852
41853 if (i++ >= max)
41854 @@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41855
41856 ret = -EFAULT;
41857 str = get_user_arg_ptr(argv, argc);
41858 - if (IS_ERR(str))
41859 + if (IS_ERR((const char __force_kernel *)str))
41860 goto out;
41861
41862 len = strnlen_user(str, MAX_ARG_STRLEN);
41863 @@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41864 int r;
41865 mm_segment_t oldfs = get_fs();
41866 struct user_arg_ptr argv = {
41867 - .ptr.native = (const char __user *const __user *)__argv,
41868 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41869 };
41870
41871 set_fs(KERNEL_DS);
41872 @@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41873 unsigned long new_end = old_end - shift;
41874 struct mmu_gather tlb;
41875
41876 - BUG_ON(new_start > new_end);
41877 + if (new_start >= new_end || new_start < mmap_min_addr)
41878 + return -ENOMEM;
41879
41880 /*
41881 * ensure there are no vmas between where we want to go
41882 @@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41883 if (vma != find_vma(mm, new_start))
41884 return -EFAULT;
41885
41886 +#ifdef CONFIG_PAX_SEGMEXEC
41887 + BUG_ON(pax_find_mirror_vma(vma));
41888 +#endif
41889 +
41890 /*
41891 * cover the whole range: [new_start, old_end)
41892 */
41893 @@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41894 stack_top = arch_align_stack(stack_top);
41895 stack_top = PAGE_ALIGN(stack_top);
41896
41897 - if (unlikely(stack_top < mmap_min_addr) ||
41898 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41899 - return -ENOMEM;
41900 -
41901 stack_shift = vma->vm_end - stack_top;
41902
41903 bprm->p -= stack_shift;
41904 @@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41905 bprm->exec -= stack_shift;
41906
41907 down_write(&mm->mmap_sem);
41908 +
41909 + /* Move stack pages down in memory. */
41910 + if (stack_shift) {
41911 + ret = shift_arg_pages(vma, stack_shift);
41912 + if (ret)
41913 + goto out_unlock;
41914 + }
41915 +
41916 vm_flags = VM_STACK_FLAGS;
41917
41918 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41919 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41920 + vm_flags &= ~VM_EXEC;
41921 +
41922 +#ifdef CONFIG_PAX_MPROTECT
41923 + if (mm->pax_flags & MF_PAX_MPROTECT)
41924 + vm_flags &= ~VM_MAYEXEC;
41925 +#endif
41926 +
41927 + }
41928 +#endif
41929 +
41930 /*
41931 * Adjust stack execute permissions; explicitly enable for
41932 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41933 @@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41934 goto out_unlock;
41935 BUG_ON(prev != vma);
41936
41937 - /* Move stack pages down in memory. */
41938 - if (stack_shift) {
41939 - ret = shift_arg_pages(vma, stack_shift);
41940 - if (ret)
41941 - goto out_unlock;
41942 - }
41943 -
41944 /* mprotect_fixup is overkill to remove the temporary stack flags */
41945 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41946
41947 @@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
41948 old_fs = get_fs();
41949 set_fs(get_ds());
41950 /* The cast to a user pointer is valid due to the set_fs() */
41951 - result = vfs_read(file, (void __user *)addr, count, &pos);
41952 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41953 set_fs(old_fs);
41954 return result;
41955 }
41956 @@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
41957 perf_event_comm(tsk);
41958 }
41959
41960 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
41961 +{
41962 + int i, ch;
41963 +
41964 + /* Copies the binary name from after last slash */
41965 + for (i = 0; (ch = *(fn++)) != '\0';) {
41966 + if (ch == '/')
41967 + i = 0; /* overwrite what we wrote */
41968 + else
41969 + if (i < len - 1)
41970 + tcomm[i++] = ch;
41971 + }
41972 + tcomm[i] = '\0';
41973 +}
41974 +
41975 int flush_old_exec(struct linux_binprm * bprm)
41976 {
41977 int retval;
41978 @@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
41979
41980 set_mm_exe_file(bprm->mm, bprm->file);
41981
41982 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
41983 /*
41984 * Release all of the old mmap stuff
41985 */
41986 @@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
41987
41988 void setup_new_exec(struct linux_binprm * bprm)
41989 {
41990 - int i, ch;
41991 - const char *name;
41992 - char tcomm[sizeof(current->comm)];
41993 -
41994 arch_pick_mmap_layout(current->mm);
41995
41996 /* This is the point of no return */
41997 @@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
41998 else
41999 set_dumpable(current->mm, suid_dumpable);
42000
42001 - name = bprm->filename;
42002 -
42003 - /* Copies the binary name from after last slash */
42004 - for (i=0; (ch = *(name++)) != '\0';) {
42005 - if (ch == '/')
42006 - i = 0; /* overwrite what we wrote */
42007 - else
42008 - if (i < (sizeof(tcomm) - 1))
42009 - tcomm[i++] = ch;
42010 - }
42011 - tcomm[i] = '\0';
42012 - set_task_comm(current, tcomm);
42013 + set_task_comm(current, bprm->tcomm);
42014
42015 /* Set the new mm task size. We have to do that late because it may
42016 * depend on TIF_32BIT which is only updated in flush_thread() on
42017 @@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
42018 }
42019 rcu_read_unlock();
42020
42021 - if (p->fs->users > n_fs) {
42022 + if (atomic_read(&p->fs->users) > n_fs) {
42023 bprm->unsafe |= LSM_UNSAFE_SHARE;
42024 } else {
42025 res = -EAGAIN;
42026 @@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
42027
42028 EXPORT_SYMBOL(search_binary_handler);
42029
42030 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42031 +static atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
42032 +#endif
42033 +
42034 /*
42035 * sys_execve() executes a new program.
42036 */
42037 @@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
42038 struct user_arg_ptr envp,
42039 struct pt_regs *regs)
42040 {
42041 +#ifdef CONFIG_GRKERNSEC
42042 + struct file *old_exec_file;
42043 + struct acl_subject_label *old_acl;
42044 + struct rlimit old_rlim[RLIM_NLIMITS];
42045 +#endif
42046 struct linux_binprm *bprm;
42047 struct file *file;
42048 struct files_struct *displaced;
42049 @@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
42050 int retval;
42051 const struct cred *cred = current_cred();
42052
42053 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42054 +
42055 /*
42056 * We move the actual failure in case of RLIMIT_NPROC excess from
42057 * set*uid() to execve() because too many poorly written programs
42058 @@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
42059 if (IS_ERR(file))
42060 goto out_unmark;
42061
42062 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
42063 + retval = -EPERM;
42064 + goto out_file;
42065 + }
42066 +
42067 sched_exec();
42068
42069 bprm->file = file;
42070 bprm->filename = filename;
42071 bprm->interp = filename;
42072
42073 + if (gr_process_user_ban()) {
42074 + retval = -EPERM;
42075 + goto out_file;
42076 + }
42077 +
42078 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42079 + retval = -EACCES;
42080 + goto out_file;
42081 + }
42082 +
42083 retval = bprm_mm_init(bprm);
42084 if (retval)
42085 goto out_file;
42086 @@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
42087 if (retval < 0)
42088 goto out;
42089
42090 + if (!gr_tpe_allow(file)) {
42091 + retval = -EACCES;
42092 + goto out;
42093 + }
42094 +
42095 + if (gr_check_crash_exec(file)) {
42096 + retval = -EACCES;
42097 + goto out;
42098 + }
42099 +
42100 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42101 +
42102 + gr_handle_exec_args(bprm, argv);
42103 +
42104 +#ifdef CONFIG_GRKERNSEC
42105 + old_acl = current->acl;
42106 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42107 + old_exec_file = current->exec_file;
42108 + get_file(file);
42109 + current->exec_file = file;
42110 +#endif
42111 +
42112 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42113 + bprm->unsafe);
42114 + if (retval < 0)
42115 + goto out_fail;
42116 +
42117 retval = search_binary_handler(bprm,regs);
42118 if (retval < 0)
42119 - goto out;
42120 + goto out_fail;
42121 +#ifdef CONFIG_GRKERNSEC
42122 + if (old_exec_file)
42123 + fput(old_exec_file);
42124 +#endif
42125
42126 /* execve succeeded */
42127 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42128 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
42129 +#endif
42130 +
42131 current->fs->in_exec = 0;
42132 current->in_execve = 0;
42133 acct_update_integrals(current);
42134 @@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
42135 put_files_struct(displaced);
42136 return retval;
42137
42138 +out_fail:
42139 +#ifdef CONFIG_GRKERNSEC
42140 + current->acl = old_acl;
42141 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42142 + fput(current->exec_file);
42143 + current->exec_file = old_exec_file;
42144 +#endif
42145 +
42146 out:
42147 if (bprm->mm) {
42148 acct_arg_size(bprm, 0);
42149 @@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
42150 {
42151 char *old_corename = cn->corename;
42152
42153 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42154 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42155 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42156
42157 if (!cn->corename) {
42158 @@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
42159 int pid_in_pattern = 0;
42160 int err = 0;
42161
42162 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42163 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42164 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42165 cn->used = 0;
42166
42167 @@ -1812,6 +1914,218 @@ out:
42168 return ispipe;
42169 }
42170
42171 +int pax_check_flags(unsigned long *flags)
42172 +{
42173 + int retval = 0;
42174 +
42175 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42176 + if (*flags & MF_PAX_SEGMEXEC)
42177 + {
42178 + *flags &= ~MF_PAX_SEGMEXEC;
42179 + retval = -EINVAL;
42180 + }
42181 +#endif
42182 +
42183 + if ((*flags & MF_PAX_PAGEEXEC)
42184 +
42185 +#ifdef CONFIG_PAX_PAGEEXEC
42186 + && (*flags & MF_PAX_SEGMEXEC)
42187 +#endif
42188 +
42189 + )
42190 + {
42191 + *flags &= ~MF_PAX_PAGEEXEC;
42192 + retval = -EINVAL;
42193 + }
42194 +
42195 + if ((*flags & MF_PAX_MPROTECT)
42196 +
42197 +#ifdef CONFIG_PAX_MPROTECT
42198 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42199 +#endif
42200 +
42201 + )
42202 + {
42203 + *flags &= ~MF_PAX_MPROTECT;
42204 + retval = -EINVAL;
42205 + }
42206 +
42207 + if ((*flags & MF_PAX_EMUTRAMP)
42208 +
42209 +#ifdef CONFIG_PAX_EMUTRAMP
42210 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42211 +#endif
42212 +
42213 + )
42214 + {
42215 + *flags &= ~MF_PAX_EMUTRAMP;
42216 + retval = -EINVAL;
42217 + }
42218 +
42219 + return retval;
42220 +}
42221 +
42222 +EXPORT_SYMBOL(pax_check_flags);
42223 +
42224 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42225 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42226 +{
42227 + struct task_struct *tsk = current;
42228 + struct mm_struct *mm = current->mm;
42229 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42230 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42231 + char *path_exec = NULL;
42232 + char *path_fault = NULL;
42233 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
42234 +
42235 + if (buffer_exec && buffer_fault) {
42236 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42237 +
42238 + down_read(&mm->mmap_sem);
42239 + vma = mm->mmap;
42240 + while (vma && (!vma_exec || !vma_fault)) {
42241 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42242 + vma_exec = vma;
42243 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42244 + vma_fault = vma;
42245 + vma = vma->vm_next;
42246 + }
42247 + if (vma_exec) {
42248 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42249 + if (IS_ERR(path_exec))
42250 + path_exec = "<path too long>";
42251 + else {
42252 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42253 + if (path_exec) {
42254 + *path_exec = 0;
42255 + path_exec = buffer_exec;
42256 + } else
42257 + path_exec = "<path too long>";
42258 + }
42259 + }
42260 + if (vma_fault) {
42261 + start = vma_fault->vm_start;
42262 + end = vma_fault->vm_end;
42263 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42264 + if (vma_fault->vm_file) {
42265 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42266 + if (IS_ERR(path_fault))
42267 + path_fault = "<path too long>";
42268 + else {
42269 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42270 + if (path_fault) {
42271 + *path_fault = 0;
42272 + path_fault = buffer_fault;
42273 + } else
42274 + path_fault = "<path too long>";
42275 + }
42276 + } else
42277 + path_fault = "<anonymous mapping>";
42278 + }
42279 + up_read(&mm->mmap_sem);
42280 + }
42281 + if (tsk->signal->curr_ip)
42282 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42283 + else
42284 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42285 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42286 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42287 + task_uid(tsk), task_euid(tsk), pc, sp);
42288 + free_page((unsigned long)buffer_exec);
42289 + free_page((unsigned long)buffer_fault);
42290 + pax_report_insns(regs, pc, sp);
42291 + do_coredump(SIGKILL, SIGKILL, regs);
42292 +}
42293 +#endif
42294 +
42295 +#ifdef CONFIG_PAX_REFCOUNT
42296 +void pax_report_refcount_overflow(struct pt_regs *regs)
42297 +{
42298 + if (current->signal->curr_ip)
42299 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42300 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42301 + else
42302 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42303 + current->comm, task_pid_nr(current), current_uid(), current_euid());
42304 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42305 + show_regs(regs);
42306 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42307 +}
42308 +#endif
42309 +
42310 +#ifdef CONFIG_PAX_USERCOPY
42311 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42312 +int object_is_on_stack(const void *obj, unsigned long len)
42313 +{
42314 + const void * const stack = task_stack_page(current);
42315 + const void * const stackend = stack + THREAD_SIZE;
42316 +
42317 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42318 + const void *frame = NULL;
42319 + const void *oldframe;
42320 +#endif
42321 +
42322 + if (obj + len < obj)
42323 + return -1;
42324 +
42325 + if (obj + len <= stack || stackend <= obj)
42326 + return 0;
42327 +
42328 + if (obj < stack || stackend < obj + len)
42329 + return -1;
42330 +
42331 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42332 + oldframe = __builtin_frame_address(1);
42333 + if (oldframe)
42334 + frame = __builtin_frame_address(2);
42335 + /*
42336 + low ----------------------------------------------> high
42337 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
42338 + ^----------------^
42339 + allow copies only within here
42340 + */
42341 + while (stack <= frame && frame < stackend) {
42342 + /* if obj + len extends past the last frame, this
42343 + check won't pass and the next frame will be 0,
42344 + causing us to bail out and correctly report
42345 + the copy as invalid
42346 + */
42347 + if (obj + len <= frame)
42348 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42349 + oldframe = frame;
42350 + frame = *(const void * const *)frame;
42351 + }
42352 + return -1;
42353 +#else
42354 + return 1;
42355 +#endif
42356 +}
42357 +
42358 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42359 +{
42360 + if (current->signal->curr_ip)
42361 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42362 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42363 + else
42364 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42365 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42366 + dump_stack();
42367 + gr_handle_kernel_exploit();
42368 + do_group_exit(SIGKILL);
42369 +}
42370 +#endif
42371 +
42372 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42373 +void pax_track_stack(void)
42374 +{
42375 + unsigned long sp = (unsigned long)&sp;
42376 + if (sp < current_thread_info()->lowest_stack &&
42377 + sp > (unsigned long)task_stack_page(current))
42378 + current_thread_info()->lowest_stack = sp;
42379 +}
42380 +EXPORT_SYMBOL(pax_track_stack);
42381 +#endif
42382 +
42383 static int zap_process(struct task_struct *start, int exit_code)
42384 {
42385 struct task_struct *t;
42386 @@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
42387 pipe = file->f_path.dentry->d_inode->i_pipe;
42388
42389 pipe_lock(pipe);
42390 - pipe->readers++;
42391 - pipe->writers--;
42392 + atomic_inc(&pipe->readers);
42393 + atomic_dec(&pipe->writers);
42394
42395 - while ((pipe->readers > 1) && (!signal_pending(current))) {
42396 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42397 wake_up_interruptible_sync(&pipe->wait);
42398 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42399 pipe_wait(pipe);
42400 }
42401
42402 - pipe->readers--;
42403 - pipe->writers++;
42404 + atomic_dec(&pipe->readers);
42405 + atomic_inc(&pipe->writers);
42406 pipe_unlock(pipe);
42407
42408 }
42409 @@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42410 int retval = 0;
42411 int flag = 0;
42412 int ispipe;
42413 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42414 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42415 struct coredump_params cprm = {
42416 .signr = signr,
42417 .regs = regs,
42418 @@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42419
42420 audit_core_dumps(signr);
42421
42422 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42423 + gr_handle_brute_attach(current, cprm.mm_flags);
42424 +
42425 binfmt = mm->binfmt;
42426 if (!binfmt || !binfmt->core_dump)
42427 goto fail;
42428 @@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42429 }
42430 cprm.limit = RLIM_INFINITY;
42431
42432 - dump_count = atomic_inc_return(&core_dump_count);
42433 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42434 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42435 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42436 task_tgid_vnr(current), current->comm);
42437 @@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42438 } else {
42439 struct inode *inode;
42440
42441 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42442 +
42443 if (cprm.limit < binfmt->min_coredump)
42444 goto fail_unlock;
42445
42446 @@ -2246,7 +2565,7 @@ close_fail:
42447 filp_close(cprm.file, NULL);
42448 fail_dropcount:
42449 if (ispipe)
42450 - atomic_dec(&core_dump_count);
42451 + atomic_dec_unchecked(&core_dump_count);
42452 fail_unlock:
42453 kfree(cn.corename);
42454 fail_corename:
42455 @@ -2265,7 +2584,7 @@ fail:
42456 */
42457 int dump_write(struct file *file, const void *addr, int nr)
42458 {
42459 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42460 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42461 }
42462 EXPORT_SYMBOL(dump_write);
42463
42464 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42465 index a8cbe1b..fed04cb 100644
42466 --- a/fs/ext2/balloc.c
42467 +++ b/fs/ext2/balloc.c
42468 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42469
42470 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42471 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42472 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42473 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42474 sbi->s_resuid != current_fsuid() &&
42475 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42476 return 0;
42477 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42478 index a203892..4e64db5 100644
42479 --- a/fs/ext3/balloc.c
42480 +++ b/fs/ext3/balloc.c
42481 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42482
42483 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42484 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42485 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42486 + if (free_blocks < root_blocks + 1 &&
42487 !use_reservation && sbi->s_resuid != current_fsuid() &&
42488 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42489 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42490 + !capable_nolog(CAP_SYS_RESOURCE)) {
42491 return 0;
42492 }
42493 return 1;
42494 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42495 index 12ccacd..a6035fce0 100644
42496 --- a/fs/ext4/balloc.c
42497 +++ b/fs/ext4/balloc.c
42498 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42499 /* Hm, nope. Are (enough) root reserved clusters available? */
42500 if (sbi->s_resuid == current_fsuid() ||
42501 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42502 - capable(CAP_SYS_RESOURCE) ||
42503 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42504 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42505 + capable_nolog(CAP_SYS_RESOURCE)) {
42506
42507 if (free_clusters >= (nclusters + dirty_clusters))
42508 return 1;
42509 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42510 index 5b0e26a..0aa002d 100644
42511 --- a/fs/ext4/ext4.h
42512 +++ b/fs/ext4/ext4.h
42513 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42514 unsigned long s_mb_last_start;
42515
42516 /* stats for buddy allocator */
42517 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42518 - atomic_t s_bal_success; /* we found long enough chunks */
42519 - atomic_t s_bal_allocated; /* in blocks */
42520 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42521 - atomic_t s_bal_goals; /* goal hits */
42522 - atomic_t s_bal_breaks; /* too long searches */
42523 - atomic_t s_bal_2orders; /* 2^order hits */
42524 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42525 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42526 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42527 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42528 + atomic_unchecked_t s_bal_goals; /* goal hits */
42529 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42530 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42531 spinlock_t s_bal_lock;
42532 unsigned long s_mb_buddies_generated;
42533 unsigned long long s_mb_generation_time;
42534 - atomic_t s_mb_lost_chunks;
42535 - atomic_t s_mb_preallocated;
42536 - atomic_t s_mb_discarded;
42537 + atomic_unchecked_t s_mb_lost_chunks;
42538 + atomic_unchecked_t s_mb_preallocated;
42539 + atomic_unchecked_t s_mb_discarded;
42540 atomic_t s_lock_busy;
42541
42542 /* locality groups */
42543 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42544 index e2d8be8..c7f0ce9 100644
42545 --- a/fs/ext4/mballoc.c
42546 +++ b/fs/ext4/mballoc.c
42547 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42548 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42549
42550 if (EXT4_SB(sb)->s_mb_stats)
42551 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42552 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42553
42554 break;
42555 }
42556 @@ -2088,7 +2088,7 @@ repeat:
42557 ac->ac_status = AC_STATUS_CONTINUE;
42558 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42559 cr = 3;
42560 - atomic_inc(&sbi->s_mb_lost_chunks);
42561 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42562 goto repeat;
42563 }
42564 }
42565 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42566 if (sbi->s_mb_stats) {
42567 ext4_msg(sb, KERN_INFO,
42568 "mballoc: %u blocks %u reqs (%u success)",
42569 - atomic_read(&sbi->s_bal_allocated),
42570 - atomic_read(&sbi->s_bal_reqs),
42571 - atomic_read(&sbi->s_bal_success));
42572 + atomic_read_unchecked(&sbi->s_bal_allocated),
42573 + atomic_read_unchecked(&sbi->s_bal_reqs),
42574 + atomic_read_unchecked(&sbi->s_bal_success));
42575 ext4_msg(sb, KERN_INFO,
42576 "mballoc: %u extents scanned, %u goal hits, "
42577 "%u 2^N hits, %u breaks, %u lost",
42578 - atomic_read(&sbi->s_bal_ex_scanned),
42579 - atomic_read(&sbi->s_bal_goals),
42580 - atomic_read(&sbi->s_bal_2orders),
42581 - atomic_read(&sbi->s_bal_breaks),
42582 - atomic_read(&sbi->s_mb_lost_chunks));
42583 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42584 + atomic_read_unchecked(&sbi->s_bal_goals),
42585 + atomic_read_unchecked(&sbi->s_bal_2orders),
42586 + atomic_read_unchecked(&sbi->s_bal_breaks),
42587 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42588 ext4_msg(sb, KERN_INFO,
42589 "mballoc: %lu generated and it took %Lu",
42590 sbi->s_mb_buddies_generated,
42591 sbi->s_mb_generation_time);
42592 ext4_msg(sb, KERN_INFO,
42593 "mballoc: %u preallocated, %u discarded",
42594 - atomic_read(&sbi->s_mb_preallocated),
42595 - atomic_read(&sbi->s_mb_discarded));
42596 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42597 + atomic_read_unchecked(&sbi->s_mb_discarded));
42598 }
42599
42600 free_percpu(sbi->s_locality_groups);
42601 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42602 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42603
42604 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42605 - atomic_inc(&sbi->s_bal_reqs);
42606 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42607 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42608 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42609 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42610 - atomic_inc(&sbi->s_bal_success);
42611 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42612 + atomic_inc_unchecked(&sbi->s_bal_success);
42613 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42614 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42615 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42616 - atomic_inc(&sbi->s_bal_goals);
42617 + atomic_inc_unchecked(&sbi->s_bal_goals);
42618 if (ac->ac_found > sbi->s_mb_max_to_scan)
42619 - atomic_inc(&sbi->s_bal_breaks);
42620 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42621 }
42622
42623 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42624 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42625 trace_ext4_mb_new_inode_pa(ac, pa);
42626
42627 ext4_mb_use_inode_pa(ac, pa);
42628 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42629 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42630
42631 ei = EXT4_I(ac->ac_inode);
42632 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42633 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42634 trace_ext4_mb_new_group_pa(ac, pa);
42635
42636 ext4_mb_use_group_pa(ac, pa);
42637 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42638 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42639
42640 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42641 lg = ac->ac_lg;
42642 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42643 * from the bitmap and continue.
42644 */
42645 }
42646 - atomic_add(free, &sbi->s_mb_discarded);
42647 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42648
42649 return err;
42650 }
42651 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42652 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42653 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42654 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42655 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42656 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42657 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42658
42659 return 0;
42660 diff --git a/fs/fcntl.c b/fs/fcntl.c
42661 index 22764c7..86372c9 100644
42662 --- a/fs/fcntl.c
42663 +++ b/fs/fcntl.c
42664 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42665 if (err)
42666 return err;
42667
42668 + if (gr_handle_chroot_fowner(pid, type))
42669 + return -ENOENT;
42670 + if (gr_check_protected_task_fowner(pid, type))
42671 + return -EACCES;
42672 +
42673 f_modown(filp, pid, type, force);
42674 return 0;
42675 }
42676 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42677
42678 static int f_setown_ex(struct file *filp, unsigned long arg)
42679 {
42680 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42681 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42682 struct f_owner_ex owner;
42683 struct pid *pid;
42684 int type;
42685 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42686
42687 static int f_getown_ex(struct file *filp, unsigned long arg)
42688 {
42689 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42690 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42691 struct f_owner_ex owner;
42692 int ret = 0;
42693
42694 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42695 switch (cmd) {
42696 case F_DUPFD:
42697 case F_DUPFD_CLOEXEC:
42698 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42699 if (arg >= rlimit(RLIMIT_NOFILE))
42700 break;
42701 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42702 diff --git a/fs/fifo.c b/fs/fifo.c
42703 index b1a524d..4ee270e 100644
42704 --- a/fs/fifo.c
42705 +++ b/fs/fifo.c
42706 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42707 */
42708 filp->f_op = &read_pipefifo_fops;
42709 pipe->r_counter++;
42710 - if (pipe->readers++ == 0)
42711 + if (atomic_inc_return(&pipe->readers) == 1)
42712 wake_up_partner(inode);
42713
42714 - if (!pipe->writers) {
42715 + if (!atomic_read(&pipe->writers)) {
42716 if ((filp->f_flags & O_NONBLOCK)) {
42717 /* suppress POLLHUP until we have
42718 * seen a writer */
42719 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42720 * errno=ENXIO when there is no process reading the FIFO.
42721 */
42722 ret = -ENXIO;
42723 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42724 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42725 goto err;
42726
42727 filp->f_op = &write_pipefifo_fops;
42728 pipe->w_counter++;
42729 - if (!pipe->writers++)
42730 + if (atomic_inc_return(&pipe->writers) == 1)
42731 wake_up_partner(inode);
42732
42733 - if (!pipe->readers) {
42734 + if (!atomic_read(&pipe->readers)) {
42735 wait_for_partner(inode, &pipe->r_counter);
42736 if (signal_pending(current))
42737 goto err_wr;
42738 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42739 */
42740 filp->f_op = &rdwr_pipefifo_fops;
42741
42742 - pipe->readers++;
42743 - pipe->writers++;
42744 + atomic_inc(&pipe->readers);
42745 + atomic_inc(&pipe->writers);
42746 pipe->r_counter++;
42747 pipe->w_counter++;
42748 - if (pipe->readers == 1 || pipe->writers == 1)
42749 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42750 wake_up_partner(inode);
42751 break;
42752
42753 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42754 return 0;
42755
42756 err_rd:
42757 - if (!--pipe->readers)
42758 + if (atomic_dec_and_test(&pipe->readers))
42759 wake_up_interruptible(&pipe->wait);
42760 ret = -ERESTARTSYS;
42761 goto err;
42762
42763 err_wr:
42764 - if (!--pipe->writers)
42765 + if (atomic_dec_and_test(&pipe->writers))
42766 wake_up_interruptible(&pipe->wait);
42767 ret = -ERESTARTSYS;
42768 goto err;
42769
42770 err:
42771 - if (!pipe->readers && !pipe->writers)
42772 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42773 free_pipe_info(inode);
42774
42775 err_nocleanup:
42776 diff --git a/fs/file.c b/fs/file.c
42777 index 4c6992d..104cdea 100644
42778 --- a/fs/file.c
42779 +++ b/fs/file.c
42780 @@ -15,6 +15,7 @@
42781 #include <linux/slab.h>
42782 #include <linux/vmalloc.h>
42783 #include <linux/file.h>
42784 +#include <linux/security.h>
42785 #include <linux/fdtable.h>
42786 #include <linux/bitops.h>
42787 #include <linux/interrupt.h>
42788 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42789 * N.B. For clone tasks sharing a files structure, this test
42790 * will limit the total number of files that can be opened.
42791 */
42792 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42793 if (nr >= rlimit(RLIMIT_NOFILE))
42794 return -EMFILE;
42795
42796 diff --git a/fs/filesystems.c b/fs/filesystems.c
42797 index 0845f84..7b4ebef 100644
42798 --- a/fs/filesystems.c
42799 +++ b/fs/filesystems.c
42800 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42801 int len = dot ? dot - name : strlen(name);
42802
42803 fs = __get_fs_type(name, len);
42804 +
42805 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42806 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42807 +#else
42808 if (!fs && (request_module("%.*s", len, name) == 0))
42809 +#endif
42810 fs = __get_fs_type(name, len);
42811
42812 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42813 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42814 index 78b519c..a8b4979 100644
42815 --- a/fs/fs_struct.c
42816 +++ b/fs/fs_struct.c
42817 @@ -4,6 +4,7 @@
42818 #include <linux/path.h>
42819 #include <linux/slab.h>
42820 #include <linux/fs_struct.h>
42821 +#include <linux/grsecurity.h>
42822 #include "internal.h"
42823
42824 static inline void path_get_longterm(struct path *path)
42825 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42826 old_root = fs->root;
42827 fs->root = *path;
42828 path_get_longterm(path);
42829 + gr_set_chroot_entries(current, path);
42830 write_seqcount_end(&fs->seq);
42831 spin_unlock(&fs->lock);
42832 if (old_root.dentry)
42833 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42834 && fs->root.mnt == old_root->mnt) {
42835 path_get_longterm(new_root);
42836 fs->root = *new_root;
42837 + gr_set_chroot_entries(p, new_root);
42838 count++;
42839 }
42840 if (fs->pwd.dentry == old_root->dentry
42841 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42842 spin_lock(&fs->lock);
42843 write_seqcount_begin(&fs->seq);
42844 tsk->fs = NULL;
42845 - kill = !--fs->users;
42846 + gr_clear_chroot_entries(tsk);
42847 + kill = !atomic_dec_return(&fs->users);
42848 write_seqcount_end(&fs->seq);
42849 spin_unlock(&fs->lock);
42850 task_unlock(tsk);
42851 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42852 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42853 /* We don't need to lock fs - think why ;-) */
42854 if (fs) {
42855 - fs->users = 1;
42856 + atomic_set(&fs->users, 1);
42857 fs->in_exec = 0;
42858 spin_lock_init(&fs->lock);
42859 seqcount_init(&fs->seq);
42860 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42861 spin_lock(&old->lock);
42862 fs->root = old->root;
42863 path_get_longterm(&fs->root);
42864 + /* instead of calling gr_set_chroot_entries here,
42865 + we call it from every caller of this function
42866 + */
42867 fs->pwd = old->pwd;
42868 path_get_longterm(&fs->pwd);
42869 spin_unlock(&old->lock);
42870 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42871
42872 task_lock(current);
42873 spin_lock(&fs->lock);
42874 - kill = !--fs->users;
42875 + kill = !atomic_dec_return(&fs->users);
42876 current->fs = new_fs;
42877 + gr_set_chroot_entries(current, &new_fs->root);
42878 spin_unlock(&fs->lock);
42879 task_unlock(current);
42880
42881 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
42882
42883 int current_umask(void)
42884 {
42885 - return current->fs->umask;
42886 + return current->fs->umask | gr_acl_umask();
42887 }
42888 EXPORT_SYMBOL(current_umask);
42889
42890 /* to be mentioned only in INIT_TASK */
42891 struct fs_struct init_fs = {
42892 - .users = 1,
42893 + .users = ATOMIC_INIT(1),
42894 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42895 .seq = SEQCNT_ZERO,
42896 .umask = 0022,
42897 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42898 task_lock(current);
42899
42900 spin_lock(&init_fs.lock);
42901 - init_fs.users++;
42902 + atomic_inc(&init_fs.users);
42903 spin_unlock(&init_fs.lock);
42904
42905 spin_lock(&fs->lock);
42906 current->fs = &init_fs;
42907 - kill = !--fs->users;
42908 + gr_set_chroot_entries(current, &current->fs->root);
42909 + kill = !atomic_dec_return(&fs->users);
42910 spin_unlock(&fs->lock);
42911
42912 task_unlock(current);
42913 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42914 index 9905350..02eaec4 100644
42915 --- a/fs/fscache/cookie.c
42916 +++ b/fs/fscache/cookie.c
42917 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42918 parent ? (char *) parent->def->name : "<no-parent>",
42919 def->name, netfs_data);
42920
42921 - fscache_stat(&fscache_n_acquires);
42922 + fscache_stat_unchecked(&fscache_n_acquires);
42923
42924 /* if there's no parent cookie, then we don't create one here either */
42925 if (!parent) {
42926 - fscache_stat(&fscache_n_acquires_null);
42927 + fscache_stat_unchecked(&fscache_n_acquires_null);
42928 _leave(" [no parent]");
42929 return NULL;
42930 }
42931 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42932 /* allocate and initialise a cookie */
42933 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42934 if (!cookie) {
42935 - fscache_stat(&fscache_n_acquires_oom);
42936 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42937 _leave(" [ENOMEM]");
42938 return NULL;
42939 }
42940 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42941
42942 switch (cookie->def->type) {
42943 case FSCACHE_COOKIE_TYPE_INDEX:
42944 - fscache_stat(&fscache_n_cookie_index);
42945 + fscache_stat_unchecked(&fscache_n_cookie_index);
42946 break;
42947 case FSCACHE_COOKIE_TYPE_DATAFILE:
42948 - fscache_stat(&fscache_n_cookie_data);
42949 + fscache_stat_unchecked(&fscache_n_cookie_data);
42950 break;
42951 default:
42952 - fscache_stat(&fscache_n_cookie_special);
42953 + fscache_stat_unchecked(&fscache_n_cookie_special);
42954 break;
42955 }
42956
42957 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42958 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42959 atomic_dec(&parent->n_children);
42960 __fscache_cookie_put(cookie);
42961 - fscache_stat(&fscache_n_acquires_nobufs);
42962 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42963 _leave(" = NULL");
42964 return NULL;
42965 }
42966 }
42967
42968 - fscache_stat(&fscache_n_acquires_ok);
42969 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42970 _leave(" = %p", cookie);
42971 return cookie;
42972 }
42973 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42974 cache = fscache_select_cache_for_object(cookie->parent);
42975 if (!cache) {
42976 up_read(&fscache_addremove_sem);
42977 - fscache_stat(&fscache_n_acquires_no_cache);
42978 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42979 _leave(" = -ENOMEDIUM [no cache]");
42980 return -ENOMEDIUM;
42981 }
42982 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42983 object = cache->ops->alloc_object(cache, cookie);
42984 fscache_stat_d(&fscache_n_cop_alloc_object);
42985 if (IS_ERR(object)) {
42986 - fscache_stat(&fscache_n_object_no_alloc);
42987 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42988 ret = PTR_ERR(object);
42989 goto error;
42990 }
42991
42992 - fscache_stat(&fscache_n_object_alloc);
42993 + fscache_stat_unchecked(&fscache_n_object_alloc);
42994
42995 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42996
42997 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42998 struct fscache_object *object;
42999 struct hlist_node *_p;
43000
43001 - fscache_stat(&fscache_n_updates);
43002 + fscache_stat_unchecked(&fscache_n_updates);
43003
43004 if (!cookie) {
43005 - fscache_stat(&fscache_n_updates_null);
43006 + fscache_stat_unchecked(&fscache_n_updates_null);
43007 _leave(" [no cookie]");
43008 return;
43009 }
43010 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43011 struct fscache_object *object;
43012 unsigned long event;
43013
43014 - fscache_stat(&fscache_n_relinquishes);
43015 + fscache_stat_unchecked(&fscache_n_relinquishes);
43016 if (retire)
43017 - fscache_stat(&fscache_n_relinquishes_retire);
43018 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
43019
43020 if (!cookie) {
43021 - fscache_stat(&fscache_n_relinquishes_null);
43022 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
43023 _leave(" [no cookie]");
43024 return;
43025 }
43026 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43027
43028 /* wait for the cookie to finish being instantiated (or to fail) */
43029 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
43030 - fscache_stat(&fscache_n_relinquishes_waitcrt);
43031 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
43032 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
43033 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
43034 }
43035 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
43036 index f6aad48..88dcf26 100644
43037 --- a/fs/fscache/internal.h
43038 +++ b/fs/fscache/internal.h
43039 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
43040 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
43041 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
43042
43043 -extern atomic_t fscache_n_op_pend;
43044 -extern atomic_t fscache_n_op_run;
43045 -extern atomic_t fscache_n_op_enqueue;
43046 -extern atomic_t fscache_n_op_deferred_release;
43047 -extern atomic_t fscache_n_op_release;
43048 -extern atomic_t fscache_n_op_gc;
43049 -extern atomic_t fscache_n_op_cancelled;
43050 -extern atomic_t fscache_n_op_rejected;
43051 +extern atomic_unchecked_t fscache_n_op_pend;
43052 +extern atomic_unchecked_t fscache_n_op_run;
43053 +extern atomic_unchecked_t fscache_n_op_enqueue;
43054 +extern atomic_unchecked_t fscache_n_op_deferred_release;
43055 +extern atomic_unchecked_t fscache_n_op_release;
43056 +extern atomic_unchecked_t fscache_n_op_gc;
43057 +extern atomic_unchecked_t fscache_n_op_cancelled;
43058 +extern atomic_unchecked_t fscache_n_op_rejected;
43059
43060 -extern atomic_t fscache_n_attr_changed;
43061 -extern atomic_t fscache_n_attr_changed_ok;
43062 -extern atomic_t fscache_n_attr_changed_nobufs;
43063 -extern atomic_t fscache_n_attr_changed_nomem;
43064 -extern atomic_t fscache_n_attr_changed_calls;
43065 +extern atomic_unchecked_t fscache_n_attr_changed;
43066 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
43067 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43068 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43069 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
43070
43071 -extern atomic_t fscache_n_allocs;
43072 -extern atomic_t fscache_n_allocs_ok;
43073 -extern atomic_t fscache_n_allocs_wait;
43074 -extern atomic_t fscache_n_allocs_nobufs;
43075 -extern atomic_t fscache_n_allocs_intr;
43076 -extern atomic_t fscache_n_allocs_object_dead;
43077 -extern atomic_t fscache_n_alloc_ops;
43078 -extern atomic_t fscache_n_alloc_op_waits;
43079 +extern atomic_unchecked_t fscache_n_allocs;
43080 +extern atomic_unchecked_t fscache_n_allocs_ok;
43081 +extern atomic_unchecked_t fscache_n_allocs_wait;
43082 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
43083 +extern atomic_unchecked_t fscache_n_allocs_intr;
43084 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
43085 +extern atomic_unchecked_t fscache_n_alloc_ops;
43086 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
43087
43088 -extern atomic_t fscache_n_retrievals;
43089 -extern atomic_t fscache_n_retrievals_ok;
43090 -extern atomic_t fscache_n_retrievals_wait;
43091 -extern atomic_t fscache_n_retrievals_nodata;
43092 -extern atomic_t fscache_n_retrievals_nobufs;
43093 -extern atomic_t fscache_n_retrievals_intr;
43094 -extern atomic_t fscache_n_retrievals_nomem;
43095 -extern atomic_t fscache_n_retrievals_object_dead;
43096 -extern atomic_t fscache_n_retrieval_ops;
43097 -extern atomic_t fscache_n_retrieval_op_waits;
43098 +extern atomic_unchecked_t fscache_n_retrievals;
43099 +extern atomic_unchecked_t fscache_n_retrievals_ok;
43100 +extern atomic_unchecked_t fscache_n_retrievals_wait;
43101 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
43102 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43103 +extern atomic_unchecked_t fscache_n_retrievals_intr;
43104 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
43105 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43106 +extern atomic_unchecked_t fscache_n_retrieval_ops;
43107 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43108
43109 -extern atomic_t fscache_n_stores;
43110 -extern atomic_t fscache_n_stores_ok;
43111 -extern atomic_t fscache_n_stores_again;
43112 -extern atomic_t fscache_n_stores_nobufs;
43113 -extern atomic_t fscache_n_stores_oom;
43114 -extern atomic_t fscache_n_store_ops;
43115 -extern atomic_t fscache_n_store_calls;
43116 -extern atomic_t fscache_n_store_pages;
43117 -extern atomic_t fscache_n_store_radix_deletes;
43118 -extern atomic_t fscache_n_store_pages_over_limit;
43119 +extern atomic_unchecked_t fscache_n_stores;
43120 +extern atomic_unchecked_t fscache_n_stores_ok;
43121 +extern atomic_unchecked_t fscache_n_stores_again;
43122 +extern atomic_unchecked_t fscache_n_stores_nobufs;
43123 +extern atomic_unchecked_t fscache_n_stores_oom;
43124 +extern atomic_unchecked_t fscache_n_store_ops;
43125 +extern atomic_unchecked_t fscache_n_store_calls;
43126 +extern atomic_unchecked_t fscache_n_store_pages;
43127 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
43128 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43129
43130 -extern atomic_t fscache_n_store_vmscan_not_storing;
43131 -extern atomic_t fscache_n_store_vmscan_gone;
43132 -extern atomic_t fscache_n_store_vmscan_busy;
43133 -extern atomic_t fscache_n_store_vmscan_cancelled;
43134 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43135 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43136 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43137 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43138
43139 -extern atomic_t fscache_n_marks;
43140 -extern atomic_t fscache_n_uncaches;
43141 +extern atomic_unchecked_t fscache_n_marks;
43142 +extern atomic_unchecked_t fscache_n_uncaches;
43143
43144 -extern atomic_t fscache_n_acquires;
43145 -extern atomic_t fscache_n_acquires_null;
43146 -extern atomic_t fscache_n_acquires_no_cache;
43147 -extern atomic_t fscache_n_acquires_ok;
43148 -extern atomic_t fscache_n_acquires_nobufs;
43149 -extern atomic_t fscache_n_acquires_oom;
43150 +extern atomic_unchecked_t fscache_n_acquires;
43151 +extern atomic_unchecked_t fscache_n_acquires_null;
43152 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
43153 +extern atomic_unchecked_t fscache_n_acquires_ok;
43154 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
43155 +extern atomic_unchecked_t fscache_n_acquires_oom;
43156
43157 -extern atomic_t fscache_n_updates;
43158 -extern atomic_t fscache_n_updates_null;
43159 -extern atomic_t fscache_n_updates_run;
43160 +extern atomic_unchecked_t fscache_n_updates;
43161 +extern atomic_unchecked_t fscache_n_updates_null;
43162 +extern atomic_unchecked_t fscache_n_updates_run;
43163
43164 -extern atomic_t fscache_n_relinquishes;
43165 -extern atomic_t fscache_n_relinquishes_null;
43166 -extern atomic_t fscache_n_relinquishes_waitcrt;
43167 -extern atomic_t fscache_n_relinquishes_retire;
43168 +extern atomic_unchecked_t fscache_n_relinquishes;
43169 +extern atomic_unchecked_t fscache_n_relinquishes_null;
43170 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43171 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
43172
43173 -extern atomic_t fscache_n_cookie_index;
43174 -extern atomic_t fscache_n_cookie_data;
43175 -extern atomic_t fscache_n_cookie_special;
43176 +extern atomic_unchecked_t fscache_n_cookie_index;
43177 +extern atomic_unchecked_t fscache_n_cookie_data;
43178 +extern atomic_unchecked_t fscache_n_cookie_special;
43179
43180 -extern atomic_t fscache_n_object_alloc;
43181 -extern atomic_t fscache_n_object_no_alloc;
43182 -extern atomic_t fscache_n_object_lookups;
43183 -extern atomic_t fscache_n_object_lookups_negative;
43184 -extern atomic_t fscache_n_object_lookups_positive;
43185 -extern atomic_t fscache_n_object_lookups_timed_out;
43186 -extern atomic_t fscache_n_object_created;
43187 -extern atomic_t fscache_n_object_avail;
43188 -extern atomic_t fscache_n_object_dead;
43189 +extern atomic_unchecked_t fscache_n_object_alloc;
43190 +extern atomic_unchecked_t fscache_n_object_no_alloc;
43191 +extern atomic_unchecked_t fscache_n_object_lookups;
43192 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
43193 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
43194 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43195 +extern atomic_unchecked_t fscache_n_object_created;
43196 +extern atomic_unchecked_t fscache_n_object_avail;
43197 +extern atomic_unchecked_t fscache_n_object_dead;
43198
43199 -extern atomic_t fscache_n_checkaux_none;
43200 -extern atomic_t fscache_n_checkaux_okay;
43201 -extern atomic_t fscache_n_checkaux_update;
43202 -extern atomic_t fscache_n_checkaux_obsolete;
43203 +extern atomic_unchecked_t fscache_n_checkaux_none;
43204 +extern atomic_unchecked_t fscache_n_checkaux_okay;
43205 +extern atomic_unchecked_t fscache_n_checkaux_update;
43206 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43207
43208 extern atomic_t fscache_n_cop_alloc_object;
43209 extern atomic_t fscache_n_cop_lookup_object;
43210 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43211 atomic_inc(stat);
43212 }
43213
43214 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43215 +{
43216 + atomic_inc_unchecked(stat);
43217 +}
43218 +
43219 static inline void fscache_stat_d(atomic_t *stat)
43220 {
43221 atomic_dec(stat);
43222 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43223
43224 #define __fscache_stat(stat) (NULL)
43225 #define fscache_stat(stat) do {} while (0)
43226 +#define fscache_stat_unchecked(stat) do {} while (0)
43227 #define fscache_stat_d(stat) do {} while (0)
43228 #endif
43229
43230 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43231 index b6b897c..0ffff9c 100644
43232 --- a/fs/fscache/object.c
43233 +++ b/fs/fscache/object.c
43234 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43235 /* update the object metadata on disk */
43236 case FSCACHE_OBJECT_UPDATING:
43237 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43238 - fscache_stat(&fscache_n_updates_run);
43239 + fscache_stat_unchecked(&fscache_n_updates_run);
43240 fscache_stat(&fscache_n_cop_update_object);
43241 object->cache->ops->update_object(object);
43242 fscache_stat_d(&fscache_n_cop_update_object);
43243 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43244 spin_lock(&object->lock);
43245 object->state = FSCACHE_OBJECT_DEAD;
43246 spin_unlock(&object->lock);
43247 - fscache_stat(&fscache_n_object_dead);
43248 + fscache_stat_unchecked(&fscache_n_object_dead);
43249 goto terminal_transit;
43250
43251 /* handle the parent cache of this object being withdrawn from
43252 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43253 spin_lock(&object->lock);
43254 object->state = FSCACHE_OBJECT_DEAD;
43255 spin_unlock(&object->lock);
43256 - fscache_stat(&fscache_n_object_dead);
43257 + fscache_stat_unchecked(&fscache_n_object_dead);
43258 goto terminal_transit;
43259
43260 /* complain about the object being woken up once it is
43261 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43262 parent->cookie->def->name, cookie->def->name,
43263 object->cache->tag->name);
43264
43265 - fscache_stat(&fscache_n_object_lookups);
43266 + fscache_stat_unchecked(&fscache_n_object_lookups);
43267 fscache_stat(&fscache_n_cop_lookup_object);
43268 ret = object->cache->ops->lookup_object(object);
43269 fscache_stat_d(&fscache_n_cop_lookup_object);
43270 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43271 if (ret == -ETIMEDOUT) {
43272 /* probably stuck behind another object, so move this one to
43273 * the back of the queue */
43274 - fscache_stat(&fscache_n_object_lookups_timed_out);
43275 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43276 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43277 }
43278
43279 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43280
43281 spin_lock(&object->lock);
43282 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43283 - fscache_stat(&fscache_n_object_lookups_negative);
43284 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43285
43286 /* transit here to allow write requests to begin stacking up
43287 * and read requests to begin returning ENODATA */
43288 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43289 * result, in which case there may be data available */
43290 spin_lock(&object->lock);
43291 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43292 - fscache_stat(&fscache_n_object_lookups_positive);
43293 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43294
43295 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43296
43297 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43298 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43299 } else {
43300 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43301 - fscache_stat(&fscache_n_object_created);
43302 + fscache_stat_unchecked(&fscache_n_object_created);
43303
43304 object->state = FSCACHE_OBJECT_AVAILABLE;
43305 spin_unlock(&object->lock);
43306 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43307 fscache_enqueue_dependents(object);
43308
43309 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43310 - fscache_stat(&fscache_n_object_avail);
43311 + fscache_stat_unchecked(&fscache_n_object_avail);
43312
43313 _leave("");
43314 }
43315 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43316 enum fscache_checkaux result;
43317
43318 if (!object->cookie->def->check_aux) {
43319 - fscache_stat(&fscache_n_checkaux_none);
43320 + fscache_stat_unchecked(&fscache_n_checkaux_none);
43321 return FSCACHE_CHECKAUX_OKAY;
43322 }
43323
43324 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43325 switch (result) {
43326 /* entry okay as is */
43327 case FSCACHE_CHECKAUX_OKAY:
43328 - fscache_stat(&fscache_n_checkaux_okay);
43329 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
43330 break;
43331
43332 /* entry requires update */
43333 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43334 - fscache_stat(&fscache_n_checkaux_update);
43335 + fscache_stat_unchecked(&fscache_n_checkaux_update);
43336 break;
43337
43338 /* entry requires deletion */
43339 case FSCACHE_CHECKAUX_OBSOLETE:
43340 - fscache_stat(&fscache_n_checkaux_obsolete);
43341 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43342 break;
43343
43344 default:
43345 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43346 index 30afdfa..2256596 100644
43347 --- a/fs/fscache/operation.c
43348 +++ b/fs/fscache/operation.c
43349 @@ -17,7 +17,7 @@
43350 #include <linux/slab.h>
43351 #include "internal.h"
43352
43353 -atomic_t fscache_op_debug_id;
43354 +atomic_unchecked_t fscache_op_debug_id;
43355 EXPORT_SYMBOL(fscache_op_debug_id);
43356
43357 /**
43358 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43359 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43360 ASSERTCMP(atomic_read(&op->usage), >, 0);
43361
43362 - fscache_stat(&fscache_n_op_enqueue);
43363 + fscache_stat_unchecked(&fscache_n_op_enqueue);
43364 switch (op->flags & FSCACHE_OP_TYPE) {
43365 case FSCACHE_OP_ASYNC:
43366 _debug("queue async");
43367 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43368 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43369 if (op->processor)
43370 fscache_enqueue_operation(op);
43371 - fscache_stat(&fscache_n_op_run);
43372 + fscache_stat_unchecked(&fscache_n_op_run);
43373 }
43374
43375 /*
43376 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43377 if (object->n_ops > 1) {
43378 atomic_inc(&op->usage);
43379 list_add_tail(&op->pend_link, &object->pending_ops);
43380 - fscache_stat(&fscache_n_op_pend);
43381 + fscache_stat_unchecked(&fscache_n_op_pend);
43382 } else if (!list_empty(&object->pending_ops)) {
43383 atomic_inc(&op->usage);
43384 list_add_tail(&op->pend_link, &object->pending_ops);
43385 - fscache_stat(&fscache_n_op_pend);
43386 + fscache_stat_unchecked(&fscache_n_op_pend);
43387 fscache_start_operations(object);
43388 } else {
43389 ASSERTCMP(object->n_in_progress, ==, 0);
43390 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43391 object->n_exclusive++; /* reads and writes must wait */
43392 atomic_inc(&op->usage);
43393 list_add_tail(&op->pend_link, &object->pending_ops);
43394 - fscache_stat(&fscache_n_op_pend);
43395 + fscache_stat_unchecked(&fscache_n_op_pend);
43396 ret = 0;
43397 } else {
43398 /* not allowed to submit ops in any other state */
43399 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43400 if (object->n_exclusive > 0) {
43401 atomic_inc(&op->usage);
43402 list_add_tail(&op->pend_link, &object->pending_ops);
43403 - fscache_stat(&fscache_n_op_pend);
43404 + fscache_stat_unchecked(&fscache_n_op_pend);
43405 } else if (!list_empty(&object->pending_ops)) {
43406 atomic_inc(&op->usage);
43407 list_add_tail(&op->pend_link, &object->pending_ops);
43408 - fscache_stat(&fscache_n_op_pend);
43409 + fscache_stat_unchecked(&fscache_n_op_pend);
43410 fscache_start_operations(object);
43411 } else {
43412 ASSERTCMP(object->n_exclusive, ==, 0);
43413 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43414 object->n_ops++;
43415 atomic_inc(&op->usage);
43416 list_add_tail(&op->pend_link, &object->pending_ops);
43417 - fscache_stat(&fscache_n_op_pend);
43418 + fscache_stat_unchecked(&fscache_n_op_pend);
43419 ret = 0;
43420 } else if (object->state == FSCACHE_OBJECT_DYING ||
43421 object->state == FSCACHE_OBJECT_LC_DYING ||
43422 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43423 - fscache_stat(&fscache_n_op_rejected);
43424 + fscache_stat_unchecked(&fscache_n_op_rejected);
43425 ret = -ENOBUFS;
43426 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43427 fscache_report_unexpected_submission(object, op, ostate);
43428 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43429
43430 ret = -EBUSY;
43431 if (!list_empty(&op->pend_link)) {
43432 - fscache_stat(&fscache_n_op_cancelled);
43433 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43434 list_del_init(&op->pend_link);
43435 object->n_ops--;
43436 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43437 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43438 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43439 BUG();
43440
43441 - fscache_stat(&fscache_n_op_release);
43442 + fscache_stat_unchecked(&fscache_n_op_release);
43443
43444 if (op->release) {
43445 op->release(op);
43446 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43447 * lock, and defer it otherwise */
43448 if (!spin_trylock(&object->lock)) {
43449 _debug("defer put");
43450 - fscache_stat(&fscache_n_op_deferred_release);
43451 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43452
43453 cache = object->cache;
43454 spin_lock(&cache->op_gc_list_lock);
43455 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43456
43457 _debug("GC DEFERRED REL OBJ%x OP%x",
43458 object->debug_id, op->debug_id);
43459 - fscache_stat(&fscache_n_op_gc);
43460 + fscache_stat_unchecked(&fscache_n_op_gc);
43461
43462 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43463
43464 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43465 index 3f7a59b..cf196cc 100644
43466 --- a/fs/fscache/page.c
43467 +++ b/fs/fscache/page.c
43468 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43469 val = radix_tree_lookup(&cookie->stores, page->index);
43470 if (!val) {
43471 rcu_read_unlock();
43472 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43473 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43474 __fscache_uncache_page(cookie, page);
43475 return true;
43476 }
43477 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43478 spin_unlock(&cookie->stores_lock);
43479
43480 if (xpage) {
43481 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43482 - fscache_stat(&fscache_n_store_radix_deletes);
43483 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43484 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43485 ASSERTCMP(xpage, ==, page);
43486 } else {
43487 - fscache_stat(&fscache_n_store_vmscan_gone);
43488 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43489 }
43490
43491 wake_up_bit(&cookie->flags, 0);
43492 @@ -107,7 +107,7 @@ page_busy:
43493 /* we might want to wait here, but that could deadlock the allocator as
43494 * the work threads writing to the cache may all end up sleeping
43495 * on memory allocation */
43496 - fscache_stat(&fscache_n_store_vmscan_busy);
43497 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43498 return false;
43499 }
43500 EXPORT_SYMBOL(__fscache_maybe_release_page);
43501 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43502 FSCACHE_COOKIE_STORING_TAG);
43503 if (!radix_tree_tag_get(&cookie->stores, page->index,
43504 FSCACHE_COOKIE_PENDING_TAG)) {
43505 - fscache_stat(&fscache_n_store_radix_deletes);
43506 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43507 xpage = radix_tree_delete(&cookie->stores, page->index);
43508 }
43509 spin_unlock(&cookie->stores_lock);
43510 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43511
43512 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43513
43514 - fscache_stat(&fscache_n_attr_changed_calls);
43515 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43516
43517 if (fscache_object_is_active(object)) {
43518 fscache_stat(&fscache_n_cop_attr_changed);
43519 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43520
43521 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43522
43523 - fscache_stat(&fscache_n_attr_changed);
43524 + fscache_stat_unchecked(&fscache_n_attr_changed);
43525
43526 op = kzalloc(sizeof(*op), GFP_KERNEL);
43527 if (!op) {
43528 - fscache_stat(&fscache_n_attr_changed_nomem);
43529 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43530 _leave(" = -ENOMEM");
43531 return -ENOMEM;
43532 }
43533 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43534 if (fscache_submit_exclusive_op(object, op) < 0)
43535 goto nobufs;
43536 spin_unlock(&cookie->lock);
43537 - fscache_stat(&fscache_n_attr_changed_ok);
43538 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43539 fscache_put_operation(op);
43540 _leave(" = 0");
43541 return 0;
43542 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43543 nobufs:
43544 spin_unlock(&cookie->lock);
43545 kfree(op);
43546 - fscache_stat(&fscache_n_attr_changed_nobufs);
43547 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43548 _leave(" = %d", -ENOBUFS);
43549 return -ENOBUFS;
43550 }
43551 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43552 /* allocate a retrieval operation and attempt to submit it */
43553 op = kzalloc(sizeof(*op), GFP_NOIO);
43554 if (!op) {
43555 - fscache_stat(&fscache_n_retrievals_nomem);
43556 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43557 return NULL;
43558 }
43559
43560 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43561 return 0;
43562 }
43563
43564 - fscache_stat(&fscache_n_retrievals_wait);
43565 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43566
43567 jif = jiffies;
43568 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43569 fscache_wait_bit_interruptible,
43570 TASK_INTERRUPTIBLE) != 0) {
43571 - fscache_stat(&fscache_n_retrievals_intr);
43572 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43573 _leave(" = -ERESTARTSYS");
43574 return -ERESTARTSYS;
43575 }
43576 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43577 */
43578 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43579 struct fscache_retrieval *op,
43580 - atomic_t *stat_op_waits,
43581 - atomic_t *stat_object_dead)
43582 + atomic_unchecked_t *stat_op_waits,
43583 + atomic_unchecked_t *stat_object_dead)
43584 {
43585 int ret;
43586
43587 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43588 goto check_if_dead;
43589
43590 _debug(">>> WT");
43591 - fscache_stat(stat_op_waits);
43592 + fscache_stat_unchecked(stat_op_waits);
43593 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43594 fscache_wait_bit_interruptible,
43595 TASK_INTERRUPTIBLE) < 0) {
43596 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43597
43598 check_if_dead:
43599 if (unlikely(fscache_object_is_dead(object))) {
43600 - fscache_stat(stat_object_dead);
43601 + fscache_stat_unchecked(stat_object_dead);
43602 return -ENOBUFS;
43603 }
43604 return 0;
43605 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43606
43607 _enter("%p,%p,,,", cookie, page);
43608
43609 - fscache_stat(&fscache_n_retrievals);
43610 + fscache_stat_unchecked(&fscache_n_retrievals);
43611
43612 if (hlist_empty(&cookie->backing_objects))
43613 goto nobufs;
43614 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43615 goto nobufs_unlock;
43616 spin_unlock(&cookie->lock);
43617
43618 - fscache_stat(&fscache_n_retrieval_ops);
43619 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43620
43621 /* pin the netfs read context in case we need to do the actual netfs
43622 * read because we've encountered a cache read failure */
43623 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43624
43625 error:
43626 if (ret == -ENOMEM)
43627 - fscache_stat(&fscache_n_retrievals_nomem);
43628 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43629 else if (ret == -ERESTARTSYS)
43630 - fscache_stat(&fscache_n_retrievals_intr);
43631 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43632 else if (ret == -ENODATA)
43633 - fscache_stat(&fscache_n_retrievals_nodata);
43634 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43635 else if (ret < 0)
43636 - fscache_stat(&fscache_n_retrievals_nobufs);
43637 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43638 else
43639 - fscache_stat(&fscache_n_retrievals_ok);
43640 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43641
43642 fscache_put_retrieval(op);
43643 _leave(" = %d", ret);
43644 @@ -429,7 +429,7 @@ nobufs_unlock:
43645 spin_unlock(&cookie->lock);
43646 kfree(op);
43647 nobufs:
43648 - fscache_stat(&fscache_n_retrievals_nobufs);
43649 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43650 _leave(" = -ENOBUFS");
43651 return -ENOBUFS;
43652 }
43653 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43654
43655 _enter("%p,,%d,,,", cookie, *nr_pages);
43656
43657 - fscache_stat(&fscache_n_retrievals);
43658 + fscache_stat_unchecked(&fscache_n_retrievals);
43659
43660 if (hlist_empty(&cookie->backing_objects))
43661 goto nobufs;
43662 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43663 goto nobufs_unlock;
43664 spin_unlock(&cookie->lock);
43665
43666 - fscache_stat(&fscache_n_retrieval_ops);
43667 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43668
43669 /* pin the netfs read context in case we need to do the actual netfs
43670 * read because we've encountered a cache read failure */
43671 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43672
43673 error:
43674 if (ret == -ENOMEM)
43675 - fscache_stat(&fscache_n_retrievals_nomem);
43676 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43677 else if (ret == -ERESTARTSYS)
43678 - fscache_stat(&fscache_n_retrievals_intr);
43679 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43680 else if (ret == -ENODATA)
43681 - fscache_stat(&fscache_n_retrievals_nodata);
43682 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43683 else if (ret < 0)
43684 - fscache_stat(&fscache_n_retrievals_nobufs);
43685 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43686 else
43687 - fscache_stat(&fscache_n_retrievals_ok);
43688 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43689
43690 fscache_put_retrieval(op);
43691 _leave(" = %d", ret);
43692 @@ -545,7 +545,7 @@ nobufs_unlock:
43693 spin_unlock(&cookie->lock);
43694 kfree(op);
43695 nobufs:
43696 - fscache_stat(&fscache_n_retrievals_nobufs);
43697 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43698 _leave(" = -ENOBUFS");
43699 return -ENOBUFS;
43700 }
43701 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43702
43703 _enter("%p,%p,,,", cookie, page);
43704
43705 - fscache_stat(&fscache_n_allocs);
43706 + fscache_stat_unchecked(&fscache_n_allocs);
43707
43708 if (hlist_empty(&cookie->backing_objects))
43709 goto nobufs;
43710 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43711 goto nobufs_unlock;
43712 spin_unlock(&cookie->lock);
43713
43714 - fscache_stat(&fscache_n_alloc_ops);
43715 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43716
43717 ret = fscache_wait_for_retrieval_activation(
43718 object, op,
43719 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43720
43721 error:
43722 if (ret == -ERESTARTSYS)
43723 - fscache_stat(&fscache_n_allocs_intr);
43724 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43725 else if (ret < 0)
43726 - fscache_stat(&fscache_n_allocs_nobufs);
43727 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43728 else
43729 - fscache_stat(&fscache_n_allocs_ok);
43730 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43731
43732 fscache_put_retrieval(op);
43733 _leave(" = %d", ret);
43734 @@ -625,7 +625,7 @@ nobufs_unlock:
43735 spin_unlock(&cookie->lock);
43736 kfree(op);
43737 nobufs:
43738 - fscache_stat(&fscache_n_allocs_nobufs);
43739 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43740 _leave(" = -ENOBUFS");
43741 return -ENOBUFS;
43742 }
43743 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43744
43745 spin_lock(&cookie->stores_lock);
43746
43747 - fscache_stat(&fscache_n_store_calls);
43748 + fscache_stat_unchecked(&fscache_n_store_calls);
43749
43750 /* find a page to store */
43751 page = NULL;
43752 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43753 page = results[0];
43754 _debug("gang %d [%lx]", n, page->index);
43755 if (page->index > op->store_limit) {
43756 - fscache_stat(&fscache_n_store_pages_over_limit);
43757 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43758 goto superseded;
43759 }
43760
43761 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43762 spin_unlock(&cookie->stores_lock);
43763 spin_unlock(&object->lock);
43764
43765 - fscache_stat(&fscache_n_store_pages);
43766 + fscache_stat_unchecked(&fscache_n_store_pages);
43767 fscache_stat(&fscache_n_cop_write_page);
43768 ret = object->cache->ops->write_page(op, page);
43769 fscache_stat_d(&fscache_n_cop_write_page);
43770 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43771 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43772 ASSERT(PageFsCache(page));
43773
43774 - fscache_stat(&fscache_n_stores);
43775 + fscache_stat_unchecked(&fscache_n_stores);
43776
43777 op = kzalloc(sizeof(*op), GFP_NOIO);
43778 if (!op)
43779 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43780 spin_unlock(&cookie->stores_lock);
43781 spin_unlock(&object->lock);
43782
43783 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43784 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43785 op->store_limit = object->store_limit;
43786
43787 if (fscache_submit_op(object, &op->op) < 0)
43788 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43789
43790 spin_unlock(&cookie->lock);
43791 radix_tree_preload_end();
43792 - fscache_stat(&fscache_n_store_ops);
43793 - fscache_stat(&fscache_n_stores_ok);
43794 + fscache_stat_unchecked(&fscache_n_store_ops);
43795 + fscache_stat_unchecked(&fscache_n_stores_ok);
43796
43797 /* the work queue now carries its own ref on the object */
43798 fscache_put_operation(&op->op);
43799 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43800 return 0;
43801
43802 already_queued:
43803 - fscache_stat(&fscache_n_stores_again);
43804 + fscache_stat_unchecked(&fscache_n_stores_again);
43805 already_pending:
43806 spin_unlock(&cookie->stores_lock);
43807 spin_unlock(&object->lock);
43808 spin_unlock(&cookie->lock);
43809 radix_tree_preload_end();
43810 kfree(op);
43811 - fscache_stat(&fscache_n_stores_ok);
43812 + fscache_stat_unchecked(&fscache_n_stores_ok);
43813 _leave(" = 0");
43814 return 0;
43815
43816 @@ -851,14 +851,14 @@ nobufs:
43817 spin_unlock(&cookie->lock);
43818 radix_tree_preload_end();
43819 kfree(op);
43820 - fscache_stat(&fscache_n_stores_nobufs);
43821 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43822 _leave(" = -ENOBUFS");
43823 return -ENOBUFS;
43824
43825 nomem_free:
43826 kfree(op);
43827 nomem:
43828 - fscache_stat(&fscache_n_stores_oom);
43829 + fscache_stat_unchecked(&fscache_n_stores_oom);
43830 _leave(" = -ENOMEM");
43831 return -ENOMEM;
43832 }
43833 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43834 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43835 ASSERTCMP(page, !=, NULL);
43836
43837 - fscache_stat(&fscache_n_uncaches);
43838 + fscache_stat_unchecked(&fscache_n_uncaches);
43839
43840 /* cache withdrawal may beat us to it */
43841 if (!PageFsCache(page))
43842 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43843 unsigned long loop;
43844
43845 #ifdef CONFIG_FSCACHE_STATS
43846 - atomic_add(pagevec->nr, &fscache_n_marks);
43847 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43848 #endif
43849
43850 for (loop = 0; loop < pagevec->nr; loop++) {
43851 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43852 index 4765190..2a067f2 100644
43853 --- a/fs/fscache/stats.c
43854 +++ b/fs/fscache/stats.c
43855 @@ -18,95 +18,95 @@
43856 /*
43857 * operation counters
43858 */
43859 -atomic_t fscache_n_op_pend;
43860 -atomic_t fscache_n_op_run;
43861 -atomic_t fscache_n_op_enqueue;
43862 -atomic_t fscache_n_op_requeue;
43863 -atomic_t fscache_n_op_deferred_release;
43864 -atomic_t fscache_n_op_release;
43865 -atomic_t fscache_n_op_gc;
43866 -atomic_t fscache_n_op_cancelled;
43867 -atomic_t fscache_n_op_rejected;
43868 +atomic_unchecked_t fscache_n_op_pend;
43869 +atomic_unchecked_t fscache_n_op_run;
43870 +atomic_unchecked_t fscache_n_op_enqueue;
43871 +atomic_unchecked_t fscache_n_op_requeue;
43872 +atomic_unchecked_t fscache_n_op_deferred_release;
43873 +atomic_unchecked_t fscache_n_op_release;
43874 +atomic_unchecked_t fscache_n_op_gc;
43875 +atomic_unchecked_t fscache_n_op_cancelled;
43876 +atomic_unchecked_t fscache_n_op_rejected;
43877
43878 -atomic_t fscache_n_attr_changed;
43879 -atomic_t fscache_n_attr_changed_ok;
43880 -atomic_t fscache_n_attr_changed_nobufs;
43881 -atomic_t fscache_n_attr_changed_nomem;
43882 -atomic_t fscache_n_attr_changed_calls;
43883 +atomic_unchecked_t fscache_n_attr_changed;
43884 +atomic_unchecked_t fscache_n_attr_changed_ok;
43885 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43886 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43887 +atomic_unchecked_t fscache_n_attr_changed_calls;
43888
43889 -atomic_t fscache_n_allocs;
43890 -atomic_t fscache_n_allocs_ok;
43891 -atomic_t fscache_n_allocs_wait;
43892 -atomic_t fscache_n_allocs_nobufs;
43893 -atomic_t fscache_n_allocs_intr;
43894 -atomic_t fscache_n_allocs_object_dead;
43895 -atomic_t fscache_n_alloc_ops;
43896 -atomic_t fscache_n_alloc_op_waits;
43897 +atomic_unchecked_t fscache_n_allocs;
43898 +atomic_unchecked_t fscache_n_allocs_ok;
43899 +atomic_unchecked_t fscache_n_allocs_wait;
43900 +atomic_unchecked_t fscache_n_allocs_nobufs;
43901 +atomic_unchecked_t fscache_n_allocs_intr;
43902 +atomic_unchecked_t fscache_n_allocs_object_dead;
43903 +atomic_unchecked_t fscache_n_alloc_ops;
43904 +atomic_unchecked_t fscache_n_alloc_op_waits;
43905
43906 -atomic_t fscache_n_retrievals;
43907 -atomic_t fscache_n_retrievals_ok;
43908 -atomic_t fscache_n_retrievals_wait;
43909 -atomic_t fscache_n_retrievals_nodata;
43910 -atomic_t fscache_n_retrievals_nobufs;
43911 -atomic_t fscache_n_retrievals_intr;
43912 -atomic_t fscache_n_retrievals_nomem;
43913 -atomic_t fscache_n_retrievals_object_dead;
43914 -atomic_t fscache_n_retrieval_ops;
43915 -atomic_t fscache_n_retrieval_op_waits;
43916 +atomic_unchecked_t fscache_n_retrievals;
43917 +atomic_unchecked_t fscache_n_retrievals_ok;
43918 +atomic_unchecked_t fscache_n_retrievals_wait;
43919 +atomic_unchecked_t fscache_n_retrievals_nodata;
43920 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43921 +atomic_unchecked_t fscache_n_retrievals_intr;
43922 +atomic_unchecked_t fscache_n_retrievals_nomem;
43923 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43924 +atomic_unchecked_t fscache_n_retrieval_ops;
43925 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43926
43927 -atomic_t fscache_n_stores;
43928 -atomic_t fscache_n_stores_ok;
43929 -atomic_t fscache_n_stores_again;
43930 -atomic_t fscache_n_stores_nobufs;
43931 -atomic_t fscache_n_stores_oom;
43932 -atomic_t fscache_n_store_ops;
43933 -atomic_t fscache_n_store_calls;
43934 -atomic_t fscache_n_store_pages;
43935 -atomic_t fscache_n_store_radix_deletes;
43936 -atomic_t fscache_n_store_pages_over_limit;
43937 +atomic_unchecked_t fscache_n_stores;
43938 +atomic_unchecked_t fscache_n_stores_ok;
43939 +atomic_unchecked_t fscache_n_stores_again;
43940 +atomic_unchecked_t fscache_n_stores_nobufs;
43941 +atomic_unchecked_t fscache_n_stores_oom;
43942 +atomic_unchecked_t fscache_n_store_ops;
43943 +atomic_unchecked_t fscache_n_store_calls;
43944 +atomic_unchecked_t fscache_n_store_pages;
43945 +atomic_unchecked_t fscache_n_store_radix_deletes;
43946 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43947
43948 -atomic_t fscache_n_store_vmscan_not_storing;
43949 -atomic_t fscache_n_store_vmscan_gone;
43950 -atomic_t fscache_n_store_vmscan_busy;
43951 -atomic_t fscache_n_store_vmscan_cancelled;
43952 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43953 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43954 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43955 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43956
43957 -atomic_t fscache_n_marks;
43958 -atomic_t fscache_n_uncaches;
43959 +atomic_unchecked_t fscache_n_marks;
43960 +atomic_unchecked_t fscache_n_uncaches;
43961
43962 -atomic_t fscache_n_acquires;
43963 -atomic_t fscache_n_acquires_null;
43964 -atomic_t fscache_n_acquires_no_cache;
43965 -atomic_t fscache_n_acquires_ok;
43966 -atomic_t fscache_n_acquires_nobufs;
43967 -atomic_t fscache_n_acquires_oom;
43968 +atomic_unchecked_t fscache_n_acquires;
43969 +atomic_unchecked_t fscache_n_acquires_null;
43970 +atomic_unchecked_t fscache_n_acquires_no_cache;
43971 +atomic_unchecked_t fscache_n_acquires_ok;
43972 +atomic_unchecked_t fscache_n_acquires_nobufs;
43973 +atomic_unchecked_t fscache_n_acquires_oom;
43974
43975 -atomic_t fscache_n_updates;
43976 -atomic_t fscache_n_updates_null;
43977 -atomic_t fscache_n_updates_run;
43978 +atomic_unchecked_t fscache_n_updates;
43979 +atomic_unchecked_t fscache_n_updates_null;
43980 +atomic_unchecked_t fscache_n_updates_run;
43981
43982 -atomic_t fscache_n_relinquishes;
43983 -atomic_t fscache_n_relinquishes_null;
43984 -atomic_t fscache_n_relinquishes_waitcrt;
43985 -atomic_t fscache_n_relinquishes_retire;
43986 +atomic_unchecked_t fscache_n_relinquishes;
43987 +atomic_unchecked_t fscache_n_relinquishes_null;
43988 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43989 +atomic_unchecked_t fscache_n_relinquishes_retire;
43990
43991 -atomic_t fscache_n_cookie_index;
43992 -atomic_t fscache_n_cookie_data;
43993 -atomic_t fscache_n_cookie_special;
43994 +atomic_unchecked_t fscache_n_cookie_index;
43995 +atomic_unchecked_t fscache_n_cookie_data;
43996 +atomic_unchecked_t fscache_n_cookie_special;
43997
43998 -atomic_t fscache_n_object_alloc;
43999 -atomic_t fscache_n_object_no_alloc;
44000 -atomic_t fscache_n_object_lookups;
44001 -atomic_t fscache_n_object_lookups_negative;
44002 -atomic_t fscache_n_object_lookups_positive;
44003 -atomic_t fscache_n_object_lookups_timed_out;
44004 -atomic_t fscache_n_object_created;
44005 -atomic_t fscache_n_object_avail;
44006 -atomic_t fscache_n_object_dead;
44007 +atomic_unchecked_t fscache_n_object_alloc;
44008 +atomic_unchecked_t fscache_n_object_no_alloc;
44009 +atomic_unchecked_t fscache_n_object_lookups;
44010 +atomic_unchecked_t fscache_n_object_lookups_negative;
44011 +atomic_unchecked_t fscache_n_object_lookups_positive;
44012 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
44013 +atomic_unchecked_t fscache_n_object_created;
44014 +atomic_unchecked_t fscache_n_object_avail;
44015 +atomic_unchecked_t fscache_n_object_dead;
44016
44017 -atomic_t fscache_n_checkaux_none;
44018 -atomic_t fscache_n_checkaux_okay;
44019 -atomic_t fscache_n_checkaux_update;
44020 -atomic_t fscache_n_checkaux_obsolete;
44021 +atomic_unchecked_t fscache_n_checkaux_none;
44022 +atomic_unchecked_t fscache_n_checkaux_okay;
44023 +atomic_unchecked_t fscache_n_checkaux_update;
44024 +atomic_unchecked_t fscache_n_checkaux_obsolete;
44025
44026 atomic_t fscache_n_cop_alloc_object;
44027 atomic_t fscache_n_cop_lookup_object;
44028 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
44029 seq_puts(m, "FS-Cache statistics\n");
44030
44031 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
44032 - atomic_read(&fscache_n_cookie_index),
44033 - atomic_read(&fscache_n_cookie_data),
44034 - atomic_read(&fscache_n_cookie_special));
44035 + atomic_read_unchecked(&fscache_n_cookie_index),
44036 + atomic_read_unchecked(&fscache_n_cookie_data),
44037 + atomic_read_unchecked(&fscache_n_cookie_special));
44038
44039 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
44040 - atomic_read(&fscache_n_object_alloc),
44041 - atomic_read(&fscache_n_object_no_alloc),
44042 - atomic_read(&fscache_n_object_avail),
44043 - atomic_read(&fscache_n_object_dead));
44044 + atomic_read_unchecked(&fscache_n_object_alloc),
44045 + atomic_read_unchecked(&fscache_n_object_no_alloc),
44046 + atomic_read_unchecked(&fscache_n_object_avail),
44047 + atomic_read_unchecked(&fscache_n_object_dead));
44048 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
44049 - atomic_read(&fscache_n_checkaux_none),
44050 - atomic_read(&fscache_n_checkaux_okay),
44051 - atomic_read(&fscache_n_checkaux_update),
44052 - atomic_read(&fscache_n_checkaux_obsolete));
44053 + atomic_read_unchecked(&fscache_n_checkaux_none),
44054 + atomic_read_unchecked(&fscache_n_checkaux_okay),
44055 + atomic_read_unchecked(&fscache_n_checkaux_update),
44056 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
44057
44058 seq_printf(m, "Pages : mrk=%u unc=%u\n",
44059 - atomic_read(&fscache_n_marks),
44060 - atomic_read(&fscache_n_uncaches));
44061 + atomic_read_unchecked(&fscache_n_marks),
44062 + atomic_read_unchecked(&fscache_n_uncaches));
44063
44064 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44065 " oom=%u\n",
44066 - atomic_read(&fscache_n_acquires),
44067 - atomic_read(&fscache_n_acquires_null),
44068 - atomic_read(&fscache_n_acquires_no_cache),
44069 - atomic_read(&fscache_n_acquires_ok),
44070 - atomic_read(&fscache_n_acquires_nobufs),
44071 - atomic_read(&fscache_n_acquires_oom));
44072 + atomic_read_unchecked(&fscache_n_acquires),
44073 + atomic_read_unchecked(&fscache_n_acquires_null),
44074 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
44075 + atomic_read_unchecked(&fscache_n_acquires_ok),
44076 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
44077 + atomic_read_unchecked(&fscache_n_acquires_oom));
44078
44079 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44080 - atomic_read(&fscache_n_object_lookups),
44081 - atomic_read(&fscache_n_object_lookups_negative),
44082 - atomic_read(&fscache_n_object_lookups_positive),
44083 - atomic_read(&fscache_n_object_created),
44084 - atomic_read(&fscache_n_object_lookups_timed_out));
44085 + atomic_read_unchecked(&fscache_n_object_lookups),
44086 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
44087 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
44088 + atomic_read_unchecked(&fscache_n_object_created),
44089 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44090
44091 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44092 - atomic_read(&fscache_n_updates),
44093 - atomic_read(&fscache_n_updates_null),
44094 - atomic_read(&fscache_n_updates_run));
44095 + atomic_read_unchecked(&fscache_n_updates),
44096 + atomic_read_unchecked(&fscache_n_updates_null),
44097 + atomic_read_unchecked(&fscache_n_updates_run));
44098
44099 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44100 - atomic_read(&fscache_n_relinquishes),
44101 - atomic_read(&fscache_n_relinquishes_null),
44102 - atomic_read(&fscache_n_relinquishes_waitcrt),
44103 - atomic_read(&fscache_n_relinquishes_retire));
44104 + atomic_read_unchecked(&fscache_n_relinquishes),
44105 + atomic_read_unchecked(&fscache_n_relinquishes_null),
44106 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44107 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
44108
44109 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44110 - atomic_read(&fscache_n_attr_changed),
44111 - atomic_read(&fscache_n_attr_changed_ok),
44112 - atomic_read(&fscache_n_attr_changed_nobufs),
44113 - atomic_read(&fscache_n_attr_changed_nomem),
44114 - atomic_read(&fscache_n_attr_changed_calls));
44115 + atomic_read_unchecked(&fscache_n_attr_changed),
44116 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
44117 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44118 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44119 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
44120
44121 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44122 - atomic_read(&fscache_n_allocs),
44123 - atomic_read(&fscache_n_allocs_ok),
44124 - atomic_read(&fscache_n_allocs_wait),
44125 - atomic_read(&fscache_n_allocs_nobufs),
44126 - atomic_read(&fscache_n_allocs_intr));
44127 + atomic_read_unchecked(&fscache_n_allocs),
44128 + atomic_read_unchecked(&fscache_n_allocs_ok),
44129 + atomic_read_unchecked(&fscache_n_allocs_wait),
44130 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
44131 + atomic_read_unchecked(&fscache_n_allocs_intr));
44132 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44133 - atomic_read(&fscache_n_alloc_ops),
44134 - atomic_read(&fscache_n_alloc_op_waits),
44135 - atomic_read(&fscache_n_allocs_object_dead));
44136 + atomic_read_unchecked(&fscache_n_alloc_ops),
44137 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
44138 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
44139
44140 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44141 " int=%u oom=%u\n",
44142 - atomic_read(&fscache_n_retrievals),
44143 - atomic_read(&fscache_n_retrievals_ok),
44144 - atomic_read(&fscache_n_retrievals_wait),
44145 - atomic_read(&fscache_n_retrievals_nodata),
44146 - atomic_read(&fscache_n_retrievals_nobufs),
44147 - atomic_read(&fscache_n_retrievals_intr),
44148 - atomic_read(&fscache_n_retrievals_nomem));
44149 + atomic_read_unchecked(&fscache_n_retrievals),
44150 + atomic_read_unchecked(&fscache_n_retrievals_ok),
44151 + atomic_read_unchecked(&fscache_n_retrievals_wait),
44152 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
44153 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44154 + atomic_read_unchecked(&fscache_n_retrievals_intr),
44155 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
44156 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44157 - atomic_read(&fscache_n_retrieval_ops),
44158 - atomic_read(&fscache_n_retrieval_op_waits),
44159 - atomic_read(&fscache_n_retrievals_object_dead));
44160 + atomic_read_unchecked(&fscache_n_retrieval_ops),
44161 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44162 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44163
44164 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44165 - atomic_read(&fscache_n_stores),
44166 - atomic_read(&fscache_n_stores_ok),
44167 - atomic_read(&fscache_n_stores_again),
44168 - atomic_read(&fscache_n_stores_nobufs),
44169 - atomic_read(&fscache_n_stores_oom));
44170 + atomic_read_unchecked(&fscache_n_stores),
44171 + atomic_read_unchecked(&fscache_n_stores_ok),
44172 + atomic_read_unchecked(&fscache_n_stores_again),
44173 + atomic_read_unchecked(&fscache_n_stores_nobufs),
44174 + atomic_read_unchecked(&fscache_n_stores_oom));
44175 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44176 - atomic_read(&fscache_n_store_ops),
44177 - atomic_read(&fscache_n_store_calls),
44178 - atomic_read(&fscache_n_store_pages),
44179 - atomic_read(&fscache_n_store_radix_deletes),
44180 - atomic_read(&fscache_n_store_pages_over_limit));
44181 + atomic_read_unchecked(&fscache_n_store_ops),
44182 + atomic_read_unchecked(&fscache_n_store_calls),
44183 + atomic_read_unchecked(&fscache_n_store_pages),
44184 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
44185 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44186
44187 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44188 - atomic_read(&fscache_n_store_vmscan_not_storing),
44189 - atomic_read(&fscache_n_store_vmscan_gone),
44190 - atomic_read(&fscache_n_store_vmscan_busy),
44191 - atomic_read(&fscache_n_store_vmscan_cancelled));
44192 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44193 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44194 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44195 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44196
44197 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44198 - atomic_read(&fscache_n_op_pend),
44199 - atomic_read(&fscache_n_op_run),
44200 - atomic_read(&fscache_n_op_enqueue),
44201 - atomic_read(&fscache_n_op_cancelled),
44202 - atomic_read(&fscache_n_op_rejected));
44203 + atomic_read_unchecked(&fscache_n_op_pend),
44204 + atomic_read_unchecked(&fscache_n_op_run),
44205 + atomic_read_unchecked(&fscache_n_op_enqueue),
44206 + atomic_read_unchecked(&fscache_n_op_cancelled),
44207 + atomic_read_unchecked(&fscache_n_op_rejected));
44208 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44209 - atomic_read(&fscache_n_op_deferred_release),
44210 - atomic_read(&fscache_n_op_release),
44211 - atomic_read(&fscache_n_op_gc));
44212 + atomic_read_unchecked(&fscache_n_op_deferred_release),
44213 + atomic_read_unchecked(&fscache_n_op_release),
44214 + atomic_read_unchecked(&fscache_n_op_gc));
44215
44216 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44217 atomic_read(&fscache_n_cop_alloc_object),
44218 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44219 index 3426521..3b75162 100644
44220 --- a/fs/fuse/cuse.c
44221 +++ b/fs/fuse/cuse.c
44222 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
44223 INIT_LIST_HEAD(&cuse_conntbl[i]);
44224
44225 /* inherit and extend fuse_dev_operations */
44226 - cuse_channel_fops = fuse_dev_operations;
44227 - cuse_channel_fops.owner = THIS_MODULE;
44228 - cuse_channel_fops.open = cuse_channel_open;
44229 - cuse_channel_fops.release = cuse_channel_release;
44230 + pax_open_kernel();
44231 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44232 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44233 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
44234 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
44235 + pax_close_kernel();
44236
44237 cuse_class = class_create(THIS_MODULE, "cuse");
44238 if (IS_ERR(cuse_class))
44239 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44240 index 2aaf3ea..8e50863 100644
44241 --- a/fs/fuse/dev.c
44242 +++ b/fs/fuse/dev.c
44243 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44244 ret = 0;
44245 pipe_lock(pipe);
44246
44247 - if (!pipe->readers) {
44248 + if (!atomic_read(&pipe->readers)) {
44249 send_sig(SIGPIPE, current, 0);
44250 if (!ret)
44251 ret = -EPIPE;
44252 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44253 index 9f63e49..d8a64c0 100644
44254 --- a/fs/fuse/dir.c
44255 +++ b/fs/fuse/dir.c
44256 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44257 return link;
44258 }
44259
44260 -static void free_link(char *link)
44261 +static void free_link(const char *link)
44262 {
44263 if (!IS_ERR(link))
44264 free_page((unsigned long) link);
44265 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44266 index cfd4959..a780959 100644
44267 --- a/fs/gfs2/inode.c
44268 +++ b/fs/gfs2/inode.c
44269 @@ -1490,7 +1490,7 @@ out:
44270
44271 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44272 {
44273 - char *s = nd_get_link(nd);
44274 + const char *s = nd_get_link(nd);
44275 if (!IS_ERR(s))
44276 kfree(s);
44277 }
44278 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44279 index 0be5a78..9cfb853 100644
44280 --- a/fs/hugetlbfs/inode.c
44281 +++ b/fs/hugetlbfs/inode.c
44282 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44283 .kill_sb = kill_litter_super,
44284 };
44285
44286 -static struct vfsmount *hugetlbfs_vfsmount;
44287 +struct vfsmount *hugetlbfs_vfsmount;
44288
44289 static int can_do_hugetlb_shm(void)
44290 {
44291 diff --git a/fs/inode.c b/fs/inode.c
44292 index ee4e66b..0451521 100644
44293 --- a/fs/inode.c
44294 +++ b/fs/inode.c
44295 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44296
44297 #ifdef CONFIG_SMP
44298 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44299 - static atomic_t shared_last_ino;
44300 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44301 + static atomic_unchecked_t shared_last_ino;
44302 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44303
44304 res = next - LAST_INO_BATCH;
44305 }
44306 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44307 index e513f19..2ab1351 100644
44308 --- a/fs/jffs2/erase.c
44309 +++ b/fs/jffs2/erase.c
44310 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44311 struct jffs2_unknown_node marker = {
44312 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44313 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44314 - .totlen = cpu_to_je32(c->cleanmarker_size)
44315 + .totlen = cpu_to_je32(c->cleanmarker_size),
44316 + .hdr_crc = cpu_to_je32(0)
44317 };
44318
44319 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44320 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44321 index b09e51d..e482afa 100644
44322 --- a/fs/jffs2/wbuf.c
44323 +++ b/fs/jffs2/wbuf.c
44324 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44325 {
44326 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44327 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44328 - .totlen = constant_cpu_to_je32(8)
44329 + .totlen = constant_cpu_to_je32(8),
44330 + .hdr_crc = constant_cpu_to_je32(0)
44331 };
44332
44333 /*
44334 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44335 index a44eff0..462e07d 100644
44336 --- a/fs/jfs/super.c
44337 +++ b/fs/jfs/super.c
44338 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44339
44340 jfs_inode_cachep =
44341 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44342 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44343 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44344 init_once);
44345 if (jfs_inode_cachep == NULL)
44346 return -ENOMEM;
44347 diff --git a/fs/libfs.c b/fs/libfs.c
44348 index f6d411e..e82a08d 100644
44349 --- a/fs/libfs.c
44350 +++ b/fs/libfs.c
44351 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44352
44353 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44354 struct dentry *next;
44355 + char d_name[sizeof(next->d_iname)];
44356 + const unsigned char *name;
44357 +
44358 next = list_entry(p, struct dentry, d_u.d_child);
44359 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44360 if (!simple_positive(next)) {
44361 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44362
44363 spin_unlock(&next->d_lock);
44364 spin_unlock(&dentry->d_lock);
44365 - if (filldir(dirent, next->d_name.name,
44366 + name = next->d_name.name;
44367 + if (name == next->d_iname) {
44368 + memcpy(d_name, name, next->d_name.len);
44369 + name = d_name;
44370 + }
44371 + if (filldir(dirent, name,
44372 next->d_name.len, filp->f_pos,
44373 next->d_inode->i_ino,
44374 dt_type(next->d_inode)) < 0)
44375 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44376 index 8392cb8..80d6193 100644
44377 --- a/fs/lockd/clntproc.c
44378 +++ b/fs/lockd/clntproc.c
44379 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44380 /*
44381 * Cookie counter for NLM requests
44382 */
44383 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44384 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44385
44386 void nlmclnt_next_cookie(struct nlm_cookie *c)
44387 {
44388 - u32 cookie = atomic_inc_return(&nlm_cookie);
44389 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44390
44391 memcpy(c->data, &cookie, 4);
44392 c->len=4;
44393 diff --git a/fs/locks.c b/fs/locks.c
44394 index 637694b..f84a121 100644
44395 --- a/fs/locks.c
44396 +++ b/fs/locks.c
44397 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44398 return;
44399
44400 if (filp->f_op && filp->f_op->flock) {
44401 - struct file_lock fl = {
44402 + struct file_lock flock = {
44403 .fl_pid = current->tgid,
44404 .fl_file = filp,
44405 .fl_flags = FL_FLOCK,
44406 .fl_type = F_UNLCK,
44407 .fl_end = OFFSET_MAX,
44408 };
44409 - filp->f_op->flock(filp, F_SETLKW, &fl);
44410 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44411 - fl.fl_ops->fl_release_private(&fl);
44412 + filp->f_op->flock(filp, F_SETLKW, &flock);
44413 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44414 + flock.fl_ops->fl_release_private(&flock);
44415 }
44416
44417 lock_flocks();
44418 diff --git a/fs/namei.c b/fs/namei.c
44419 index 5008f01..90328a7 100644
44420 --- a/fs/namei.c
44421 +++ b/fs/namei.c
44422 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44423 if (ret != -EACCES)
44424 return ret;
44425
44426 +#ifdef CONFIG_GRKERNSEC
44427 + /* we'll block if we have to log due to a denied capability use */
44428 + if (mask & MAY_NOT_BLOCK)
44429 + return -ECHILD;
44430 +#endif
44431 +
44432 if (S_ISDIR(inode->i_mode)) {
44433 /* DACs are overridable for directories */
44434 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44435 - return 0;
44436 if (!(mask & MAY_WRITE))
44437 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44438 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44439 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44440 return 0;
44441 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44442 + return 0;
44443 return -EACCES;
44444 }
44445 /*
44446 + * Searching includes executable on directories, else just read.
44447 + */
44448 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44449 + if (mask == MAY_READ)
44450 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44451 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44452 + return 0;
44453 +
44454 + /*
44455 * Read/write DACs are always overridable.
44456 * Executable DACs are overridable when there is
44457 * at least one exec bit set.
44458 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44459 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44460 return 0;
44461
44462 - /*
44463 - * Searching includes executable on directories, else just read.
44464 - */
44465 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44466 - if (mask == MAY_READ)
44467 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44468 - return 0;
44469 -
44470 return -EACCES;
44471 }
44472
44473 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44474 return error;
44475 }
44476
44477 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44478 + dentry->d_inode, dentry, nd->path.mnt)) {
44479 + error = -EACCES;
44480 + *p = ERR_PTR(error); /* no ->put_link(), please */
44481 + path_put(&nd->path);
44482 + return error;
44483 + }
44484 +
44485 nd->last_type = LAST_BIND;
44486 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44487 error = PTR_ERR(*p);
44488 if (!IS_ERR(*p)) {
44489 - char *s = nd_get_link(nd);
44490 + const char *s = nd_get_link(nd);
44491 error = 0;
44492 if (s)
44493 error = __vfs_follow_link(nd, s);
44494 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44495 if (!err)
44496 err = complete_walk(nd);
44497
44498 + if (!(nd->flags & LOOKUP_PARENT)) {
44499 +#ifdef CONFIG_GRKERNSEC
44500 + if (flags & LOOKUP_RCU) {
44501 + if (!err)
44502 + path_put(&nd->path);
44503 + err = -ECHILD;
44504 + } else
44505 +#endif
44506 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44507 + if (!err)
44508 + path_put(&nd->path);
44509 + err = -ENOENT;
44510 + }
44511 + }
44512 +
44513 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44514 if (!nd->inode->i_op->lookup) {
44515 path_put(&nd->path);
44516 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44517 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44518
44519 if (likely(!retval)) {
44520 + if (*name != '/' && nd->path.dentry && nd->inode) {
44521 +#ifdef CONFIG_GRKERNSEC
44522 + if (flags & LOOKUP_RCU)
44523 + return -ECHILD;
44524 +#endif
44525 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44526 + return -ENOENT;
44527 + }
44528 +
44529 if (unlikely(!audit_dummy_context())) {
44530 if (nd->path.dentry && nd->inode)
44531 audit_inode(name, nd->path.dentry);
44532 @@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44533 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44534 return -EPERM;
44535
44536 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44537 + return -EPERM;
44538 + if (gr_handle_rawio(inode))
44539 + return -EPERM;
44540 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44541 + return -EACCES;
44542 +
44543 return 0;
44544 }
44545
44546 @@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44547 error = complete_walk(nd);
44548 if (error)
44549 return ERR_PTR(error);
44550 +#ifdef CONFIG_GRKERNSEC
44551 + if (nd->flags & LOOKUP_RCU) {
44552 + error = -ECHILD;
44553 + goto exit;
44554 + }
44555 +#endif
44556 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44557 + error = -ENOENT;
44558 + goto exit;
44559 + }
44560 audit_inode(pathname, nd->path.dentry);
44561 if (open_flag & O_CREAT) {
44562 error = -EISDIR;
44563 @@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44564 error = complete_walk(nd);
44565 if (error)
44566 return ERR_PTR(error);
44567 +#ifdef CONFIG_GRKERNSEC
44568 + if (nd->flags & LOOKUP_RCU) {
44569 + error = -ECHILD;
44570 + goto exit;
44571 + }
44572 +#endif
44573 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44574 + error = -ENOENT;
44575 + goto exit;
44576 + }
44577 audit_inode(pathname, dir);
44578 goto ok;
44579 }
44580 @@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44581 error = complete_walk(nd);
44582 if (error)
44583 return ERR_PTR(-ECHILD);
44584 +#ifdef CONFIG_GRKERNSEC
44585 + if (nd->flags & LOOKUP_RCU) {
44586 + error = -ECHILD;
44587 + goto exit;
44588 + }
44589 +#endif
44590 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44591 + error = -ENOENT;
44592 + goto exit;
44593 + }
44594
44595 error = -ENOTDIR;
44596 if (nd->flags & LOOKUP_DIRECTORY) {
44597 @@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44598 /* Negative dentry, just create the file */
44599 if (!dentry->d_inode) {
44600 int mode = op->mode;
44601 +
44602 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44603 + error = -EACCES;
44604 + goto exit_mutex_unlock;
44605 + }
44606 +
44607 if (!IS_POSIXACL(dir->d_inode))
44608 mode &= ~current_umask();
44609 /*
44610 @@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44611 error = vfs_create(dir->d_inode, dentry, mode, nd);
44612 if (error)
44613 goto exit_mutex_unlock;
44614 + else
44615 + gr_handle_create(path->dentry, path->mnt);
44616 mutex_unlock(&dir->d_inode->i_mutex);
44617 dput(nd->path.dentry);
44618 nd->path.dentry = dentry;
44619 @@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44620 /*
44621 * It already exists.
44622 */
44623 +
44624 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44625 + error = -ENOENT;
44626 + goto exit_mutex_unlock;
44627 + }
44628 +
44629 + /* only check if O_CREAT is specified, all other checks need to go
44630 + into may_open */
44631 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44632 + error = -EACCES;
44633 + goto exit_mutex_unlock;
44634 + }
44635 +
44636 mutex_unlock(&dir->d_inode->i_mutex);
44637 audit_inode(pathname, path->dentry);
44638
44639 @@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44640 *path = nd.path;
44641 return dentry;
44642 eexist:
44643 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44644 + dput(dentry);
44645 + dentry = ERR_PTR(-ENOENT);
44646 + goto fail;
44647 + }
44648 dput(dentry);
44649 dentry = ERR_PTR(-EEXIST);
44650 fail:
44651 @@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44652 }
44653 EXPORT_SYMBOL(user_path_create);
44654
44655 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44656 +{
44657 + char *tmp = getname(pathname);
44658 + struct dentry *res;
44659 + if (IS_ERR(tmp))
44660 + return ERR_CAST(tmp);
44661 + res = kern_path_create(dfd, tmp, path, is_dir);
44662 + if (IS_ERR(res))
44663 + putname(tmp);
44664 + else
44665 + *to = tmp;
44666 + return res;
44667 +}
44668 +
44669 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44670 {
44671 int error = may_create(dir, dentry);
44672 @@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44673 error = mnt_want_write(path.mnt);
44674 if (error)
44675 goto out_dput;
44676 +
44677 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44678 + error = -EPERM;
44679 + goto out_drop_write;
44680 + }
44681 +
44682 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44683 + error = -EACCES;
44684 + goto out_drop_write;
44685 + }
44686 +
44687 error = security_path_mknod(&path, dentry, mode, dev);
44688 if (error)
44689 goto out_drop_write;
44690 @@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44691 }
44692 out_drop_write:
44693 mnt_drop_write(path.mnt);
44694 +
44695 + if (!error)
44696 + gr_handle_create(dentry, path.mnt);
44697 out_dput:
44698 dput(dentry);
44699 mutex_unlock(&path.dentry->d_inode->i_mutex);
44700 @@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44701 error = mnt_want_write(path.mnt);
44702 if (error)
44703 goto out_dput;
44704 +
44705 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44706 + error = -EACCES;
44707 + goto out_drop_write;
44708 + }
44709 +
44710 error = security_path_mkdir(&path, dentry, mode);
44711 if (error)
44712 goto out_drop_write;
44713 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44714 out_drop_write:
44715 mnt_drop_write(path.mnt);
44716 +
44717 + if (!error)
44718 + gr_handle_create(dentry, path.mnt);
44719 out_dput:
44720 dput(dentry);
44721 mutex_unlock(&path.dentry->d_inode->i_mutex);
44722 @@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44723 char * name;
44724 struct dentry *dentry;
44725 struct nameidata nd;
44726 + ino_t saved_ino = 0;
44727 + dev_t saved_dev = 0;
44728
44729 error = user_path_parent(dfd, pathname, &nd, &name);
44730 if (error)
44731 @@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44732 error = -ENOENT;
44733 goto exit3;
44734 }
44735 +
44736 + saved_ino = dentry->d_inode->i_ino;
44737 + saved_dev = gr_get_dev_from_dentry(dentry);
44738 +
44739 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44740 + error = -EACCES;
44741 + goto exit3;
44742 + }
44743 +
44744 error = mnt_want_write(nd.path.mnt);
44745 if (error)
44746 goto exit3;
44747 @@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44748 if (error)
44749 goto exit4;
44750 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44751 + if (!error && (saved_dev || saved_ino))
44752 + gr_handle_delete(saved_ino, saved_dev);
44753 exit4:
44754 mnt_drop_write(nd.path.mnt);
44755 exit3:
44756 @@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44757 struct dentry *dentry;
44758 struct nameidata nd;
44759 struct inode *inode = NULL;
44760 + ino_t saved_ino = 0;
44761 + dev_t saved_dev = 0;
44762
44763 error = user_path_parent(dfd, pathname, &nd, &name);
44764 if (error)
44765 @@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44766 if (!inode)
44767 goto slashes;
44768 ihold(inode);
44769 +
44770 + if (inode->i_nlink <= 1) {
44771 + saved_ino = inode->i_ino;
44772 + saved_dev = gr_get_dev_from_dentry(dentry);
44773 + }
44774 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44775 + error = -EACCES;
44776 + goto exit2;
44777 + }
44778 +
44779 error = mnt_want_write(nd.path.mnt);
44780 if (error)
44781 goto exit2;
44782 @@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44783 if (error)
44784 goto exit3;
44785 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44786 + if (!error && (saved_ino || saved_dev))
44787 + gr_handle_delete(saved_ino, saved_dev);
44788 exit3:
44789 mnt_drop_write(nd.path.mnt);
44790 exit2:
44791 @@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44792 error = mnt_want_write(path.mnt);
44793 if (error)
44794 goto out_dput;
44795 +
44796 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44797 + error = -EACCES;
44798 + goto out_drop_write;
44799 + }
44800 +
44801 error = security_path_symlink(&path, dentry, from);
44802 if (error)
44803 goto out_drop_write;
44804 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44805 + if (!error)
44806 + gr_handle_create(dentry, path.mnt);
44807 out_drop_write:
44808 mnt_drop_write(path.mnt);
44809 out_dput:
44810 @@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44811 {
44812 struct dentry *new_dentry;
44813 struct path old_path, new_path;
44814 + char *to = NULL;
44815 int how = 0;
44816 int error;
44817
44818 @@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44819 if (error)
44820 return error;
44821
44822 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44823 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44824 error = PTR_ERR(new_dentry);
44825 if (IS_ERR(new_dentry))
44826 goto out;
44827 @@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44828 error = mnt_want_write(new_path.mnt);
44829 if (error)
44830 goto out_dput;
44831 +
44832 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44833 + old_path.dentry->d_inode,
44834 + old_path.dentry->d_inode->i_mode, to)) {
44835 + error = -EACCES;
44836 + goto out_drop_write;
44837 + }
44838 +
44839 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44840 + old_path.dentry, old_path.mnt, to)) {
44841 + error = -EACCES;
44842 + goto out_drop_write;
44843 + }
44844 +
44845 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44846 if (error)
44847 goto out_drop_write;
44848 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44849 + if (!error)
44850 + gr_handle_create(new_dentry, new_path.mnt);
44851 out_drop_write:
44852 mnt_drop_write(new_path.mnt);
44853 out_dput:
44854 + putname(to);
44855 dput(new_dentry);
44856 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44857 path_put(&new_path);
44858 @@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44859 if (new_dentry == trap)
44860 goto exit5;
44861
44862 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44863 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44864 + to);
44865 + if (error)
44866 + goto exit5;
44867 +
44868 error = mnt_want_write(oldnd.path.mnt);
44869 if (error)
44870 goto exit5;
44871 @@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44872 goto exit6;
44873 error = vfs_rename(old_dir->d_inode, old_dentry,
44874 new_dir->d_inode, new_dentry);
44875 + if (!error)
44876 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44877 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44878 exit6:
44879 mnt_drop_write(oldnd.path.mnt);
44880 exit5:
44881 @@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44882
44883 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44884 {
44885 + char tmpbuf[64];
44886 + const char *newlink;
44887 int len;
44888
44889 len = PTR_ERR(link);
44890 @@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44891 len = strlen(link);
44892 if (len > (unsigned) buflen)
44893 len = buflen;
44894 - if (copy_to_user(buffer, link, len))
44895 +
44896 + if (len < sizeof(tmpbuf)) {
44897 + memcpy(tmpbuf, link, len);
44898 + newlink = tmpbuf;
44899 + } else
44900 + newlink = link;
44901 +
44902 + if (copy_to_user(buffer, newlink, len))
44903 len = -EFAULT;
44904 out:
44905 return len;
44906 diff --git a/fs/namespace.c b/fs/namespace.c
44907 index cfc6d44..b4632a5 100644
44908 --- a/fs/namespace.c
44909 +++ b/fs/namespace.c
44910 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44911 if (!(sb->s_flags & MS_RDONLY))
44912 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44913 up_write(&sb->s_umount);
44914 +
44915 + gr_log_remount(mnt->mnt_devname, retval);
44916 +
44917 return retval;
44918 }
44919
44920 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44921 br_write_unlock(vfsmount_lock);
44922 up_write(&namespace_sem);
44923 release_mounts(&umount_list);
44924 +
44925 + gr_log_unmount(mnt->mnt_devname, retval);
44926 +
44927 return retval;
44928 }
44929
44930 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44931 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44932 MS_STRICTATIME);
44933
44934 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44935 + retval = -EPERM;
44936 + goto dput_out;
44937 + }
44938 +
44939 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44940 + retval = -EPERM;
44941 + goto dput_out;
44942 + }
44943 +
44944 if (flags & MS_REMOUNT)
44945 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44946 data_page);
44947 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44948 dev_name, data_page);
44949 dput_out:
44950 path_put(&path);
44951 +
44952 + gr_log_mount(dev_name, dir_name, retval);
44953 +
44954 return retval;
44955 }
44956
44957 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44958 if (error)
44959 goto out2;
44960
44961 + if (gr_handle_chroot_pivot()) {
44962 + error = -EPERM;
44963 + goto out2;
44964 + }
44965 +
44966 get_fs_root(current->fs, &root);
44967 error = lock_mount(&old);
44968 if (error)
44969 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44970 index 3db6b82..a57597e 100644
44971 --- a/fs/nfs/blocklayout/blocklayout.c
44972 +++ b/fs/nfs/blocklayout/blocklayout.c
44973 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44974 */
44975 struct parallel_io {
44976 struct kref refcnt;
44977 - struct rpc_call_ops call_ops;
44978 + rpc_call_ops_no_const call_ops;
44979 void (*pnfs_callback) (void *data);
44980 void *data;
44981 };
44982 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44983 index 50a15fa..ca113f9 100644
44984 --- a/fs/nfs/inode.c
44985 +++ b/fs/nfs/inode.c
44986 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44987 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44988 nfsi->attrtimeo_timestamp = jiffies;
44989
44990 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44991 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44992 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44993 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44994 else
44995 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44996 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44997 }
44998
44999 -static atomic_long_t nfs_attr_generation_counter;
45000 +static atomic_long_unchecked_t nfs_attr_generation_counter;
45001
45002 static unsigned long nfs_read_attr_generation_counter(void)
45003 {
45004 - return atomic_long_read(&nfs_attr_generation_counter);
45005 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
45006 }
45007
45008 unsigned long nfs_inc_attr_generation_counter(void)
45009 {
45010 - return atomic_long_inc_return(&nfs_attr_generation_counter);
45011 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
45012 }
45013
45014 void nfs_fattr_init(struct nfs_fattr *fattr)
45015 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
45016 index 7a2e442..8e544cc 100644
45017 --- a/fs/nfsd/vfs.c
45018 +++ b/fs/nfsd/vfs.c
45019 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45020 } else {
45021 oldfs = get_fs();
45022 set_fs(KERNEL_DS);
45023 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
45024 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
45025 set_fs(oldfs);
45026 }
45027
45028 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45029
45030 /* Write the data. */
45031 oldfs = get_fs(); set_fs(KERNEL_DS);
45032 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
45033 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
45034 set_fs(oldfs);
45035 if (host_err < 0)
45036 goto out_nfserr;
45037 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
45038 */
45039
45040 oldfs = get_fs(); set_fs(KERNEL_DS);
45041 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
45042 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
45043 set_fs(oldfs);
45044
45045 if (host_err < 0)
45046 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
45047 index 9fde1c0..14e8827 100644
45048 --- a/fs/notify/fanotify/fanotify_user.c
45049 +++ b/fs/notify/fanotify/fanotify_user.c
45050 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
45051 goto out_close_fd;
45052
45053 ret = -EFAULT;
45054 - if (copy_to_user(buf, &fanotify_event_metadata,
45055 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45056 + copy_to_user(buf, &fanotify_event_metadata,
45057 fanotify_event_metadata.event_len))
45058 goto out_kill_access_response;
45059
45060 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
45061 index ee18815..7aa5d01 100644
45062 --- a/fs/notify/notification.c
45063 +++ b/fs/notify/notification.c
45064 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45065 * get set to 0 so it will never get 'freed'
45066 */
45067 static struct fsnotify_event *q_overflow_event;
45068 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45069 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45070
45071 /**
45072 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45073 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45074 */
45075 u32 fsnotify_get_cookie(void)
45076 {
45077 - return atomic_inc_return(&fsnotify_sync_cookie);
45078 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45079 }
45080 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45081
45082 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45083 index 99e3610..02c1068 100644
45084 --- a/fs/ntfs/dir.c
45085 +++ b/fs/ntfs/dir.c
45086 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
45087 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45088 ~(s64)(ndir->itype.index.block_size - 1)));
45089 /* Bounds checks. */
45090 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45091 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45092 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45093 "inode 0x%lx or driver bug.", vdir->i_ino);
45094 goto err_out;
45095 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45096 index c587e2d..3641eaa 100644
45097 --- a/fs/ntfs/file.c
45098 +++ b/fs/ntfs/file.c
45099 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45100 #endif /* NTFS_RW */
45101 };
45102
45103 -const struct file_operations ntfs_empty_file_ops = {};
45104 +const struct file_operations ntfs_empty_file_ops __read_only;
45105
45106 -const struct inode_operations ntfs_empty_inode_ops = {};
45107 +const struct inode_operations ntfs_empty_inode_ops __read_only;
45108 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45109 index 210c352..a174f83 100644
45110 --- a/fs/ocfs2/localalloc.c
45111 +++ b/fs/ocfs2/localalloc.c
45112 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45113 goto bail;
45114 }
45115
45116 - atomic_inc(&osb->alloc_stats.moves);
45117 + atomic_inc_unchecked(&osb->alloc_stats.moves);
45118
45119 bail:
45120 if (handle)
45121 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45122 index d355e6e..578d905 100644
45123 --- a/fs/ocfs2/ocfs2.h
45124 +++ b/fs/ocfs2/ocfs2.h
45125 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
45126
45127 struct ocfs2_alloc_stats
45128 {
45129 - atomic_t moves;
45130 - atomic_t local_data;
45131 - atomic_t bitmap_data;
45132 - atomic_t bg_allocs;
45133 - atomic_t bg_extends;
45134 + atomic_unchecked_t moves;
45135 + atomic_unchecked_t local_data;
45136 + atomic_unchecked_t bitmap_data;
45137 + atomic_unchecked_t bg_allocs;
45138 + atomic_unchecked_t bg_extends;
45139 };
45140
45141 enum ocfs2_local_alloc_state
45142 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45143 index ba5d97e..c77db25 100644
45144 --- a/fs/ocfs2/suballoc.c
45145 +++ b/fs/ocfs2/suballoc.c
45146 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45147 mlog_errno(status);
45148 goto bail;
45149 }
45150 - atomic_inc(&osb->alloc_stats.bg_extends);
45151 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45152
45153 /* You should never ask for this much metadata */
45154 BUG_ON(bits_wanted >
45155 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45156 mlog_errno(status);
45157 goto bail;
45158 }
45159 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45160 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45161
45162 *suballoc_loc = res.sr_bg_blkno;
45163 *suballoc_bit_start = res.sr_bit_offset;
45164 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45165 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45166 res->sr_bits);
45167
45168 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45169 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45170
45171 BUG_ON(res->sr_bits != 1);
45172
45173 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45174 mlog_errno(status);
45175 goto bail;
45176 }
45177 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45178 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45179
45180 BUG_ON(res.sr_bits != 1);
45181
45182 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45183 cluster_start,
45184 num_clusters);
45185 if (!status)
45186 - atomic_inc(&osb->alloc_stats.local_data);
45187 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
45188 } else {
45189 if (min_clusters > (osb->bitmap_cpg - 1)) {
45190 /* The only paths asking for contiguousness
45191 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45192 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45193 res.sr_bg_blkno,
45194 res.sr_bit_offset);
45195 - atomic_inc(&osb->alloc_stats.bitmap_data);
45196 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45197 *num_clusters = res.sr_bits;
45198 }
45199 }
45200 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45201 index 4994f8b..eaab8eb 100644
45202 --- a/fs/ocfs2/super.c
45203 +++ b/fs/ocfs2/super.c
45204 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45205 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45206 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45207 "Stats",
45208 - atomic_read(&osb->alloc_stats.bitmap_data),
45209 - atomic_read(&osb->alloc_stats.local_data),
45210 - atomic_read(&osb->alloc_stats.bg_allocs),
45211 - atomic_read(&osb->alloc_stats.moves),
45212 - atomic_read(&osb->alloc_stats.bg_extends));
45213 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45214 + atomic_read_unchecked(&osb->alloc_stats.local_data),
45215 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45216 + atomic_read_unchecked(&osb->alloc_stats.moves),
45217 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45218
45219 out += snprintf(buf + out, len - out,
45220 "%10s => State: %u Descriptor: %llu Size: %u bits "
45221 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45222 spin_lock_init(&osb->osb_xattr_lock);
45223 ocfs2_init_steal_slots(osb);
45224
45225 - atomic_set(&osb->alloc_stats.moves, 0);
45226 - atomic_set(&osb->alloc_stats.local_data, 0);
45227 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
45228 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
45229 - atomic_set(&osb->alloc_stats.bg_extends, 0);
45230 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45231 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45232 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45233 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45234 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45235
45236 /* Copy the blockcheck stats from the superblock probe */
45237 osb->osb_ecc_stats = *stats;
45238 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45239 index 5d22872..523db20 100644
45240 --- a/fs/ocfs2/symlink.c
45241 +++ b/fs/ocfs2/symlink.c
45242 @@ -142,7 +142,7 @@ bail:
45243
45244 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45245 {
45246 - char *link = nd_get_link(nd);
45247 + const char *link = nd_get_link(nd);
45248 if (!IS_ERR(link))
45249 kfree(link);
45250 }
45251 diff --git a/fs/open.c b/fs/open.c
45252 index 22c41b5..78894cf 100644
45253 --- a/fs/open.c
45254 +++ b/fs/open.c
45255 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45256 error = locks_verify_truncate(inode, NULL, length);
45257 if (!error)
45258 error = security_path_truncate(&path);
45259 +
45260 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45261 + error = -EACCES;
45262 +
45263 if (!error)
45264 error = do_truncate(path.dentry, length, 0, NULL);
45265
45266 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45267 if (__mnt_is_readonly(path.mnt))
45268 res = -EROFS;
45269
45270 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45271 + res = -EACCES;
45272 +
45273 out_path_release:
45274 path_put(&path);
45275 out:
45276 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45277 if (error)
45278 goto dput_and_out;
45279
45280 + gr_log_chdir(path.dentry, path.mnt);
45281 +
45282 set_fs_pwd(current->fs, &path);
45283
45284 dput_and_out:
45285 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45286 goto out_putf;
45287
45288 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45289 +
45290 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45291 + error = -EPERM;
45292 +
45293 + if (!error)
45294 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45295 +
45296 if (!error)
45297 set_fs_pwd(current->fs, &file->f_path);
45298 out_putf:
45299 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45300 if (error)
45301 goto dput_and_out;
45302
45303 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45304 + goto dput_and_out;
45305 +
45306 set_fs_root(current->fs, &path);
45307 +
45308 + gr_handle_chroot_chdir(&path);
45309 +
45310 error = 0;
45311 dput_and_out:
45312 path_put(&path);
45313 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45314 if (error)
45315 return error;
45316 mutex_lock(&inode->i_mutex);
45317 +
45318 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
45319 + error = -EACCES;
45320 + goto out_unlock;
45321 + }
45322 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45323 + error = -EACCES;
45324 + goto out_unlock;
45325 + }
45326 +
45327 error = security_path_chmod(path->dentry, path->mnt, mode);
45328 if (error)
45329 goto out_unlock;
45330 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45331 int error;
45332 struct iattr newattrs;
45333
45334 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
45335 + return -EACCES;
45336 +
45337 newattrs.ia_valid = ATTR_CTIME;
45338 if (user != (uid_t) -1) {
45339 newattrs.ia_valid |= ATTR_UID;
45340 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45341 index 6296b40..417c00f 100644
45342 --- a/fs/partitions/efi.c
45343 +++ b/fs/partitions/efi.c
45344 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45345 if (!gpt)
45346 return NULL;
45347
45348 + if (!le32_to_cpu(gpt->num_partition_entries))
45349 + return NULL;
45350 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45351 + if (!pte)
45352 + return NULL;
45353 +
45354 count = le32_to_cpu(gpt->num_partition_entries) *
45355 le32_to_cpu(gpt->sizeof_partition_entry);
45356 - if (!count)
45357 - return NULL;
45358 - pte = kzalloc(count, GFP_KERNEL);
45359 - if (!pte)
45360 - return NULL;
45361 -
45362 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45363 (u8 *) pte,
45364 count) < count) {
45365 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45366 index bd8ae78..539d250 100644
45367 --- a/fs/partitions/ldm.c
45368 +++ b/fs/partitions/ldm.c
45369 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45370 goto found;
45371 }
45372
45373 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45374 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45375 if (!f) {
45376 ldm_crit ("Out of memory.");
45377 return false;
45378 diff --git a/fs/pipe.c b/fs/pipe.c
45379 index 4065f07..68c0706 100644
45380 --- a/fs/pipe.c
45381 +++ b/fs/pipe.c
45382 @@ -420,9 +420,9 @@ redo:
45383 }
45384 if (bufs) /* More to do? */
45385 continue;
45386 - if (!pipe->writers)
45387 + if (!atomic_read(&pipe->writers))
45388 break;
45389 - if (!pipe->waiting_writers) {
45390 + if (!atomic_read(&pipe->waiting_writers)) {
45391 /* syscall merging: Usually we must not sleep
45392 * if O_NONBLOCK is set, or if we got some data.
45393 * But if a writer sleeps in kernel space, then
45394 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45395 mutex_lock(&inode->i_mutex);
45396 pipe = inode->i_pipe;
45397
45398 - if (!pipe->readers) {
45399 + if (!atomic_read(&pipe->readers)) {
45400 send_sig(SIGPIPE, current, 0);
45401 ret = -EPIPE;
45402 goto out;
45403 @@ -530,7 +530,7 @@ redo1:
45404 for (;;) {
45405 int bufs;
45406
45407 - if (!pipe->readers) {
45408 + if (!atomic_read(&pipe->readers)) {
45409 send_sig(SIGPIPE, current, 0);
45410 if (!ret)
45411 ret = -EPIPE;
45412 @@ -616,9 +616,9 @@ redo2:
45413 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45414 do_wakeup = 0;
45415 }
45416 - pipe->waiting_writers++;
45417 + atomic_inc(&pipe->waiting_writers);
45418 pipe_wait(pipe);
45419 - pipe->waiting_writers--;
45420 + atomic_dec(&pipe->waiting_writers);
45421 }
45422 out:
45423 mutex_unlock(&inode->i_mutex);
45424 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45425 mask = 0;
45426 if (filp->f_mode & FMODE_READ) {
45427 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45428 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45429 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45430 mask |= POLLHUP;
45431 }
45432
45433 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45434 * Most Unices do not set POLLERR for FIFOs but on Linux they
45435 * behave exactly like pipes for poll().
45436 */
45437 - if (!pipe->readers)
45438 + if (!atomic_read(&pipe->readers))
45439 mask |= POLLERR;
45440 }
45441
45442 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45443
45444 mutex_lock(&inode->i_mutex);
45445 pipe = inode->i_pipe;
45446 - pipe->readers -= decr;
45447 - pipe->writers -= decw;
45448 + atomic_sub(decr, &pipe->readers);
45449 + atomic_sub(decw, &pipe->writers);
45450
45451 - if (!pipe->readers && !pipe->writers) {
45452 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45453 free_pipe_info(inode);
45454 } else {
45455 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45456 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45457
45458 if (inode->i_pipe) {
45459 ret = 0;
45460 - inode->i_pipe->readers++;
45461 + atomic_inc(&inode->i_pipe->readers);
45462 }
45463
45464 mutex_unlock(&inode->i_mutex);
45465 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45466
45467 if (inode->i_pipe) {
45468 ret = 0;
45469 - inode->i_pipe->writers++;
45470 + atomic_inc(&inode->i_pipe->writers);
45471 }
45472
45473 mutex_unlock(&inode->i_mutex);
45474 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45475 if (inode->i_pipe) {
45476 ret = 0;
45477 if (filp->f_mode & FMODE_READ)
45478 - inode->i_pipe->readers++;
45479 + atomic_inc(&inode->i_pipe->readers);
45480 if (filp->f_mode & FMODE_WRITE)
45481 - inode->i_pipe->writers++;
45482 + atomic_inc(&inode->i_pipe->writers);
45483 }
45484
45485 mutex_unlock(&inode->i_mutex);
45486 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45487 inode->i_pipe = NULL;
45488 }
45489
45490 -static struct vfsmount *pipe_mnt __read_mostly;
45491 +struct vfsmount *pipe_mnt __read_mostly;
45492
45493 /*
45494 * pipefs_dname() is called from d_path().
45495 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45496 goto fail_iput;
45497 inode->i_pipe = pipe;
45498
45499 - pipe->readers = pipe->writers = 1;
45500 + atomic_set(&pipe->readers, 1);
45501 + atomic_set(&pipe->writers, 1);
45502 inode->i_fop = &rdwr_pipefifo_fops;
45503
45504 /*
45505 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45506 index 15af622..0e9f4467 100644
45507 --- a/fs/proc/Kconfig
45508 +++ b/fs/proc/Kconfig
45509 @@ -30,12 +30,12 @@ config PROC_FS
45510
45511 config PROC_KCORE
45512 bool "/proc/kcore support" if !ARM
45513 - depends on PROC_FS && MMU
45514 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45515
45516 config PROC_VMCORE
45517 bool "/proc/vmcore support"
45518 - depends on PROC_FS && CRASH_DUMP
45519 - default y
45520 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45521 + default n
45522 help
45523 Exports the dump image of crashed kernel in ELF format.
45524
45525 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45526 limited in memory.
45527
45528 config PROC_PAGE_MONITOR
45529 - default y
45530 - depends on PROC_FS && MMU
45531 + default n
45532 + depends on PROC_FS && MMU && !GRKERNSEC
45533 bool "Enable /proc page monitoring" if EXPERT
45534 help
45535 Various /proc files exist to monitor process memory utilization:
45536 diff --git a/fs/proc/array.c b/fs/proc/array.c
45537 index 3a1dafd..1456746 100644
45538 --- a/fs/proc/array.c
45539 +++ b/fs/proc/array.c
45540 @@ -60,6 +60,7 @@
45541 #include <linux/tty.h>
45542 #include <linux/string.h>
45543 #include <linux/mman.h>
45544 +#include <linux/grsecurity.h>
45545 #include <linux/proc_fs.h>
45546 #include <linux/ioport.h>
45547 #include <linux/uaccess.h>
45548 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45549 seq_putc(m, '\n');
45550 }
45551
45552 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45553 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45554 +{
45555 + if (p->mm)
45556 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45557 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45558 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45559 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45560 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45561 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45562 + else
45563 + seq_printf(m, "PaX:\t-----\n");
45564 +}
45565 +#endif
45566 +
45567 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45568 struct pid *pid, struct task_struct *task)
45569 {
45570 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45571 task_cpus_allowed(m, task);
45572 cpuset_task_status_allowed(m, task);
45573 task_context_switch_counts(m, task);
45574 +
45575 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45576 + task_pax(m, task);
45577 +#endif
45578 +
45579 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45580 + task_grsec_rbac(m, task);
45581 +#endif
45582 +
45583 return 0;
45584 }
45585
45586 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45587 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45588 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45589 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45590 +#endif
45591 +
45592 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45593 struct pid *pid, struct task_struct *task, int whole)
45594 {
45595 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45596 char tcomm[sizeof(task->comm)];
45597 unsigned long flags;
45598
45599 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45600 + if (current->exec_id != m->exec_id) {
45601 + gr_log_badprocpid("stat");
45602 + return 0;
45603 + }
45604 +#endif
45605 +
45606 state = *get_task_state(task);
45607 vsize = eip = esp = 0;
45608 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45609 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45610 gtime = task->gtime;
45611 }
45612
45613 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45614 + if (PAX_RAND_FLAGS(mm)) {
45615 + eip = 0;
45616 + esp = 0;
45617 + wchan = 0;
45618 + }
45619 +#endif
45620 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45621 + wchan = 0;
45622 + eip =0;
45623 + esp =0;
45624 +#endif
45625 +
45626 /* scale priority and nice values from timeslices to -20..20 */
45627 /* to make it look like a "normal" Unix priority/nice value */
45628 priority = task_prio(task);
45629 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45630 vsize,
45631 mm ? get_mm_rss(mm) : 0,
45632 rsslim,
45633 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45634 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45635 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45636 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45637 +#else
45638 mm ? (permitted ? mm->start_code : 1) : 0,
45639 mm ? (permitted ? mm->end_code : 1) : 0,
45640 (permitted && mm) ? mm->start_stack : 0,
45641 +#endif
45642 esp,
45643 eip,
45644 /* The signal information here is obsolete.
45645 @@ -535,6 +592,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45646 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
45647 struct mm_struct *mm = get_task_mm(task);
45648
45649 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45650 + if (current->exec_id != m->exec_id) {
45651 + gr_log_badprocpid("statm");
45652 + return 0;
45653 + }
45654 +#endif
45655 +
45656 if (mm) {
45657 size = task_statm(mm, &shared, &text, &data, &resident);
45658 mmput(mm);
45659 @@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45660
45661 return 0;
45662 }
45663 +
45664 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45665 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45666 +{
45667 + u32 curr_ip = 0;
45668 + unsigned long flags;
45669 +
45670 + if (lock_task_sighand(task, &flags)) {
45671 + curr_ip = task->signal->curr_ip;
45672 + unlock_task_sighand(task, &flags);
45673 + }
45674 +
45675 + return sprintf(buffer, "%pI4\n", &curr_ip);
45676 +}
45677 +#endif
45678 diff --git a/fs/proc/base.c b/fs/proc/base.c
45679 index 1ace83d..f5e575d 100644
45680 --- a/fs/proc/base.c
45681 +++ b/fs/proc/base.c
45682 @@ -107,6 +107,22 @@ struct pid_entry {
45683 union proc_op op;
45684 };
45685
45686 +struct getdents_callback {
45687 + struct linux_dirent __user * current_dir;
45688 + struct linux_dirent __user * previous;
45689 + struct file * file;
45690 + int count;
45691 + int error;
45692 +};
45693 +
45694 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45695 + loff_t offset, u64 ino, unsigned int d_type)
45696 +{
45697 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45698 + buf->error = -EINVAL;
45699 + return 0;
45700 +}
45701 +
45702 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45703 .name = (NAME), \
45704 .len = sizeof(NAME) - 1, \
45705 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45706 return result;
45707 }
45708
45709 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45710 -{
45711 - struct mm_struct *mm;
45712 - int err;
45713 -
45714 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45715 - if (err)
45716 - return ERR_PTR(err);
45717 -
45718 - mm = get_task_mm(task);
45719 - if (mm && mm != current->mm &&
45720 - !ptrace_may_access(task, mode)) {
45721 - mmput(mm);
45722 - mm = ERR_PTR(-EACCES);
45723 - }
45724 - mutex_unlock(&task->signal->cred_guard_mutex);
45725 -
45726 - return mm;
45727 -}
45728 -
45729 struct mm_struct *mm_for_maps(struct task_struct *task)
45730 {
45731 return mm_access(task, PTRACE_MODE_READ);
45732 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45733 if (!mm->arg_end)
45734 goto out_mm; /* Shh! No looking before we're done */
45735
45736 + if (gr_acl_handle_procpidmem(task))
45737 + goto out_mm;
45738 +
45739 len = mm->arg_end - mm->arg_start;
45740
45741 if (len > PAGE_SIZE)
45742 @@ -256,12 +255,28 @@ out:
45743 return res;
45744 }
45745
45746 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45747 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45748 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45749 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45750 +#endif
45751 +
45752 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45753 {
45754 struct mm_struct *mm = mm_for_maps(task);
45755 int res = PTR_ERR(mm);
45756 if (mm && !IS_ERR(mm)) {
45757 unsigned int nwords = 0;
45758 +
45759 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45760 + /* allow if we're currently ptracing this task */
45761 + if (PAX_RAND_FLAGS(mm) &&
45762 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45763 + mmput(mm);
45764 + return 0;
45765 + }
45766 +#endif
45767 +
45768 do {
45769 nwords += 2;
45770 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45771 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45772 }
45773
45774
45775 -#ifdef CONFIG_KALLSYMS
45776 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45777 /*
45778 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45779 * Returns the resolved symbol. If that fails, simply return the address.
45780 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45781 mutex_unlock(&task->signal->cred_guard_mutex);
45782 }
45783
45784 -#ifdef CONFIG_STACKTRACE
45785 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45786
45787 #define MAX_STACK_TRACE_DEPTH 64
45788
45789 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45790 return count;
45791 }
45792
45793 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45794 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45795 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45796 {
45797 long nr;
45798 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45799 /************************************************************************/
45800
45801 /* permission checks */
45802 -static int proc_fd_access_allowed(struct inode *inode)
45803 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45804 {
45805 struct task_struct *task;
45806 int allowed = 0;
45807 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45808 */
45809 task = get_proc_task(inode);
45810 if (task) {
45811 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45812 + if (log)
45813 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45814 + else
45815 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45816 put_task_struct(task);
45817 }
45818 return allowed;
45819 @@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file)
45820 file->f_mode |= FMODE_UNSIGNED_OFFSET;
45821 file->private_data = mm;
45822
45823 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45824 + file->f_version = current->exec_id;
45825 +#endif
45826 +
45827 return 0;
45828 }
45829
45830 @@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
45831 ssize_t copied;
45832 char *page;
45833
45834 +#ifdef CONFIG_GRKERNSEC
45835 + if (write)
45836 + return -EPERM;
45837 +#endif
45838 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45839 + if (file->f_version != current->exec_id) {
45840 + gr_log_badprocpid("mem");
45841 + return 0;
45842 + }
45843 +#endif
45844 +
45845 if (!mm)
45846 return 0;
45847
45848 @@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45849 if (!task)
45850 goto out_no_task;
45851
45852 + if (gr_acl_handle_procpidmem(task))
45853 + goto out;
45854 +
45855 ret = -ENOMEM;
45856 page = (char *)__get_free_page(GFP_TEMPORARY);
45857 if (!page)
45858 @@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45859 path_put(&nd->path);
45860
45861 /* Are we allowed to snoop on the tasks file descriptors? */
45862 - if (!proc_fd_access_allowed(inode))
45863 + if (!proc_fd_access_allowed(inode,0))
45864 goto out;
45865
45866 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45867 @@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45868 struct path path;
45869
45870 /* Are we allowed to snoop on the tasks file descriptors? */
45871 - if (!proc_fd_access_allowed(inode))
45872 - goto out;
45873 + /* logging this is needed for learning on chromium to work properly,
45874 + but we don't want to flood the logs from 'ps' which does a readlink
45875 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45876 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45877 + */
45878 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45879 + if (!proc_fd_access_allowed(inode,0))
45880 + goto out;
45881 + } else {
45882 + if (!proc_fd_access_allowed(inode,1))
45883 + goto out;
45884 + }
45885
45886 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45887 if (error)
45888 @@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45889 rcu_read_lock();
45890 cred = __task_cred(task);
45891 inode->i_uid = cred->euid;
45892 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45893 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45894 +#else
45895 inode->i_gid = cred->egid;
45896 +#endif
45897 rcu_read_unlock();
45898 }
45899 security_task_to_inode(task, inode);
45900 @@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45901 struct inode *inode = dentry->d_inode;
45902 struct task_struct *task;
45903 const struct cred *cred;
45904 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45905 + const struct cred *tmpcred = current_cred();
45906 +#endif
45907
45908 generic_fillattr(inode, stat);
45909
45910 @@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45911 stat->uid = 0;
45912 stat->gid = 0;
45913 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45914 +
45915 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45916 + rcu_read_unlock();
45917 + return -ENOENT;
45918 + }
45919 +
45920 if (task) {
45921 + cred = __task_cred(task);
45922 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45923 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45924 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45925 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45926 +#endif
45927 + ) {
45928 +#endif
45929 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45930 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45931 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45932 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45933 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45934 +#endif
45935 task_dumpable(task)) {
45936 - cred = __task_cred(task);
45937 stat->uid = cred->euid;
45938 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45939 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45940 +#else
45941 stat->gid = cred->egid;
45942 +#endif
45943 }
45944 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45945 + } else {
45946 + rcu_read_unlock();
45947 + return -ENOENT;
45948 + }
45949 +#endif
45950 }
45951 rcu_read_unlock();
45952 return 0;
45953 @@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45954
45955 if (task) {
45956 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45957 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45958 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45959 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45960 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45961 +#endif
45962 task_dumpable(task)) {
45963 rcu_read_lock();
45964 cred = __task_cred(task);
45965 inode->i_uid = cred->euid;
45966 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45967 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45968 +#else
45969 inode->i_gid = cred->egid;
45970 +#endif
45971 rcu_read_unlock();
45972 } else {
45973 inode->i_uid = 0;
45974 @@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45975 int fd = proc_fd(inode);
45976
45977 if (task) {
45978 - files = get_files_struct(task);
45979 + if (!gr_acl_handle_procpidmem(task))
45980 + files = get_files_struct(task);
45981 put_task_struct(task);
45982 }
45983 if (files) {
45984 @@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = {
45985 */
45986 static int proc_fd_permission(struct inode *inode, int mask)
45987 {
45988 + struct task_struct *task;
45989 int rv = generic_permission(inode, mask);
45990 - if (rv == 0)
45991 - return 0;
45992 +
45993 if (task_pid(current) == proc_pid(inode))
45994 rv = 0;
45995 +
45996 + task = get_proc_task(inode);
45997 + if (task == NULL)
45998 + return rv;
45999 +
46000 + if (gr_acl_handle_procpidmem(task))
46001 + rv = -EACCES;
46002 +
46003 + put_task_struct(task);
46004 +
46005 return rv;
46006 }
46007
46008 @@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
46009 if (!task)
46010 goto out_no_task;
46011
46012 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46013 + goto out;
46014 +
46015 /*
46016 * Yes, it does not scale. And it should not. Don't add
46017 * new entries into /proc/<tgid>/ without very good reasons.
46018 @@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp,
46019 if (!task)
46020 goto out_no_task;
46021
46022 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46023 + goto out;
46024 +
46025 ret = 0;
46026 i = filp->f_pos;
46027 switch (i) {
46028 @@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
46029 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
46030 void *cookie)
46031 {
46032 - char *s = nd_get_link(nd);
46033 + const char *s = nd_get_link(nd);
46034 if (!IS_ERR(s))
46035 __putname(s);
46036 }
46037 @@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = {
46038 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
46039 #endif
46040 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46041 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46042 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46043 INF("syscall", S_IRUGO, proc_pid_syscall),
46044 #endif
46045 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46046 @@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = {
46047 #ifdef CONFIG_SECURITY
46048 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46049 #endif
46050 -#ifdef CONFIG_KALLSYMS
46051 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46052 INF("wchan", S_IRUGO, proc_pid_wchan),
46053 #endif
46054 -#ifdef CONFIG_STACKTRACE
46055 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46056 ONE("stack", S_IRUGO, proc_pid_stack),
46057 #endif
46058 #ifdef CONFIG_SCHEDSTATS
46059 @@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = {
46060 #ifdef CONFIG_HARDWALL
46061 INF("hardwall", S_IRUGO, proc_pid_hardwall),
46062 #endif
46063 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46064 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
46065 +#endif
46066 };
46067
46068 static int proc_tgid_base_readdir(struct file * filp,
46069 @@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
46070 if (!inode)
46071 goto out;
46072
46073 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46074 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
46075 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46076 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46077 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
46078 +#else
46079 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
46080 +#endif
46081 inode->i_op = &proc_tgid_base_inode_operations;
46082 inode->i_fop = &proc_tgid_base_operations;
46083 inode->i_flags|=S_IMMUTABLE;
46084 @@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
46085 if (!task)
46086 goto out;
46087
46088 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46089 + goto out_put_task;
46090 +
46091 result = proc_pid_instantiate(dir, dentry, task, NULL);
46092 +out_put_task:
46093 put_task_struct(task);
46094 out:
46095 return result;
46096 @@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46097 {
46098 unsigned int nr;
46099 struct task_struct *reaper;
46100 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46101 + const struct cred *tmpcred = current_cred();
46102 + const struct cred *itercred;
46103 +#endif
46104 + filldir_t __filldir = filldir;
46105 struct tgid_iter iter;
46106 struct pid_namespace *ns;
46107
46108 @@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46109 for (iter = next_tgid(ns, iter);
46110 iter.task;
46111 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46112 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46113 + rcu_read_lock();
46114 + itercred = __task_cred(iter.task);
46115 +#endif
46116 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46117 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46118 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46119 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46120 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46121 +#endif
46122 + )
46123 +#endif
46124 + )
46125 + __filldir = &gr_fake_filldir;
46126 + else
46127 + __filldir = filldir;
46128 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46129 + rcu_read_unlock();
46130 +#endif
46131 filp->f_pos = iter.tgid + TGID_OFFSET;
46132 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46133 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46134 put_task_struct(iter.task);
46135 goto out;
46136 }
46137 @@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = {
46138 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46139 #endif
46140 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46141 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46142 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46143 INF("syscall", S_IRUGO, proc_pid_syscall),
46144 #endif
46145 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46146 @@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = {
46147 #ifdef CONFIG_SECURITY
46148 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46149 #endif
46150 -#ifdef CONFIG_KALLSYMS
46151 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46152 INF("wchan", S_IRUGO, proc_pid_wchan),
46153 #endif
46154 -#ifdef CONFIG_STACKTRACE
46155 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46156 ONE("stack", S_IRUGO, proc_pid_stack),
46157 #endif
46158 #ifdef CONFIG_SCHEDSTATS
46159 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46160 index 82676e3..5f8518a 100644
46161 --- a/fs/proc/cmdline.c
46162 +++ b/fs/proc/cmdline.c
46163 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46164
46165 static int __init proc_cmdline_init(void)
46166 {
46167 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46168 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46169 +#else
46170 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46171 +#endif
46172 return 0;
46173 }
46174 module_init(proc_cmdline_init);
46175 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46176 index b143471..bb105e5 100644
46177 --- a/fs/proc/devices.c
46178 +++ b/fs/proc/devices.c
46179 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46180
46181 static int __init proc_devices_init(void)
46182 {
46183 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46184 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46185 +#else
46186 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46187 +#endif
46188 return 0;
46189 }
46190 module_init(proc_devices_init);
46191 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46192 index 7737c54..7172574 100644
46193 --- a/fs/proc/inode.c
46194 +++ b/fs/proc/inode.c
46195 @@ -18,12 +18,18 @@
46196 #include <linux/module.h>
46197 #include <linux/sysctl.h>
46198 #include <linux/slab.h>
46199 +#include <linux/grsecurity.h>
46200
46201 #include <asm/system.h>
46202 #include <asm/uaccess.h>
46203
46204 #include "internal.h"
46205
46206 +#ifdef CONFIG_PROC_SYSCTL
46207 +extern const struct inode_operations proc_sys_inode_operations;
46208 +extern const struct inode_operations proc_sys_dir_operations;
46209 +#endif
46210 +
46211 static void proc_evict_inode(struct inode *inode)
46212 {
46213 struct proc_dir_entry *de;
46214 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46215 ns_ops = PROC_I(inode)->ns_ops;
46216 if (ns_ops && ns_ops->put)
46217 ns_ops->put(PROC_I(inode)->ns);
46218 +
46219 +#ifdef CONFIG_PROC_SYSCTL
46220 + if (inode->i_op == &proc_sys_inode_operations ||
46221 + inode->i_op == &proc_sys_dir_operations)
46222 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46223 +#endif
46224 +
46225 }
46226
46227 static struct kmem_cache * proc_inode_cachep;
46228 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46229 if (de->mode) {
46230 inode->i_mode = de->mode;
46231 inode->i_uid = de->uid;
46232 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46233 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46234 +#else
46235 inode->i_gid = de->gid;
46236 +#endif
46237 }
46238 if (de->size)
46239 inode->i_size = de->size;
46240 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46241 index 7838e5c..ff92cbc 100644
46242 --- a/fs/proc/internal.h
46243 +++ b/fs/proc/internal.h
46244 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46245 struct pid *pid, struct task_struct *task);
46246 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46247 struct pid *pid, struct task_struct *task);
46248 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46249 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46250 +#endif
46251 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46252
46253 extern const struct file_operations proc_maps_operations;
46254 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46255 index d245cb2..f4e8498 100644
46256 --- a/fs/proc/kcore.c
46257 +++ b/fs/proc/kcore.c
46258 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46259 * the addresses in the elf_phdr on our list.
46260 */
46261 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46262 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46263 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46264 + if (tsz > buflen)
46265 tsz = buflen;
46266 -
46267 +
46268 while (buflen) {
46269 struct kcore_list *m;
46270
46271 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46272 kfree(elf_buf);
46273 } else {
46274 if (kern_addr_valid(start)) {
46275 - unsigned long n;
46276 + char *elf_buf;
46277 + mm_segment_t oldfs;
46278
46279 - n = copy_to_user(buffer, (char *)start, tsz);
46280 - /*
46281 - * We cannot distingush between fault on source
46282 - * and fault on destination. When this happens
46283 - * we clear too and hope it will trigger the
46284 - * EFAULT again.
46285 - */
46286 - if (n) {
46287 - if (clear_user(buffer + tsz - n,
46288 - n))
46289 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46290 + if (!elf_buf)
46291 + return -ENOMEM;
46292 + oldfs = get_fs();
46293 + set_fs(KERNEL_DS);
46294 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46295 + set_fs(oldfs);
46296 + if (copy_to_user(buffer, elf_buf, tsz)) {
46297 + kfree(elf_buf);
46298 return -EFAULT;
46299 + }
46300 }
46301 + set_fs(oldfs);
46302 + kfree(elf_buf);
46303 } else {
46304 if (clear_user(buffer, tsz))
46305 return -EFAULT;
46306 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46307
46308 static int open_kcore(struct inode *inode, struct file *filp)
46309 {
46310 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46311 + return -EPERM;
46312 +#endif
46313 if (!capable(CAP_SYS_RAWIO))
46314 return -EPERM;
46315 if (kcore_need_update)
46316 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46317 index 80e4645..53e5fcf 100644
46318 --- a/fs/proc/meminfo.c
46319 +++ b/fs/proc/meminfo.c
46320 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46321 vmi.used >> 10,
46322 vmi.largest_chunk >> 10
46323 #ifdef CONFIG_MEMORY_FAILURE
46324 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46325 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46326 #endif
46327 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46328 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46329 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46330 index b1822dd..df622cb 100644
46331 --- a/fs/proc/nommu.c
46332 +++ b/fs/proc/nommu.c
46333 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46334 if (len < 1)
46335 len = 1;
46336 seq_printf(m, "%*c", len, ' ');
46337 - seq_path(m, &file->f_path, "");
46338 + seq_path(m, &file->f_path, "\n\\");
46339 }
46340
46341 seq_putc(m, '\n');
46342 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46343 index f738024..876984a 100644
46344 --- a/fs/proc/proc_net.c
46345 +++ b/fs/proc/proc_net.c
46346 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46347 struct task_struct *task;
46348 struct nsproxy *ns;
46349 struct net *net = NULL;
46350 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46351 + const struct cred *cred = current_cred();
46352 +#endif
46353 +
46354 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46355 + if (cred->fsuid)
46356 + return net;
46357 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46358 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46359 + return net;
46360 +#endif
46361
46362 rcu_read_lock();
46363 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46364 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46365 index a6b6217..1e0579d 100644
46366 --- a/fs/proc/proc_sysctl.c
46367 +++ b/fs/proc/proc_sysctl.c
46368 @@ -9,11 +9,13 @@
46369 #include <linux/namei.h>
46370 #include "internal.h"
46371
46372 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46373 +
46374 static const struct dentry_operations proc_sys_dentry_operations;
46375 static const struct file_operations proc_sys_file_operations;
46376 -static const struct inode_operations proc_sys_inode_operations;
46377 +const struct inode_operations proc_sys_inode_operations;
46378 static const struct file_operations proc_sys_dir_file_operations;
46379 -static const struct inode_operations proc_sys_dir_operations;
46380 +const struct inode_operations proc_sys_dir_operations;
46381
46382 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46383 {
46384 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46385
46386 err = NULL;
46387 d_set_d_op(dentry, &proc_sys_dentry_operations);
46388 +
46389 + gr_handle_proc_create(dentry, inode);
46390 +
46391 d_add(dentry, inode);
46392
46393 + if (gr_handle_sysctl(p, MAY_EXEC))
46394 + err = ERR_PTR(-ENOENT);
46395 +
46396 out:
46397 sysctl_head_finish(head);
46398 return err;
46399 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46400 if (!table->proc_handler)
46401 goto out;
46402
46403 +#ifdef CONFIG_GRKERNSEC
46404 + error = -EPERM;
46405 + if (write && !capable(CAP_SYS_ADMIN))
46406 + goto out;
46407 +#endif
46408 +
46409 /* careful: calling conventions are nasty here */
46410 res = count;
46411 error = table->proc_handler(table, write, buf, &res, ppos);
46412 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46413 return -ENOMEM;
46414 } else {
46415 d_set_d_op(child, &proc_sys_dentry_operations);
46416 +
46417 + gr_handle_proc_create(child, inode);
46418 +
46419 d_add(child, inode);
46420 }
46421 } else {
46422 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46423 if (*pos < file->f_pos)
46424 continue;
46425
46426 + if (gr_handle_sysctl(table, 0))
46427 + continue;
46428 +
46429 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46430 if (res)
46431 return res;
46432 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46433 if (IS_ERR(head))
46434 return PTR_ERR(head);
46435
46436 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46437 + return -ENOENT;
46438 +
46439 generic_fillattr(inode, stat);
46440 if (table)
46441 stat->mode = (stat->mode & S_IFMT) | table->mode;
46442 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46443 .llseek = generic_file_llseek,
46444 };
46445
46446 -static const struct inode_operations proc_sys_inode_operations = {
46447 +const struct inode_operations proc_sys_inode_operations = {
46448 .permission = proc_sys_permission,
46449 .setattr = proc_sys_setattr,
46450 .getattr = proc_sys_getattr,
46451 };
46452
46453 -static const struct inode_operations proc_sys_dir_operations = {
46454 +const struct inode_operations proc_sys_dir_operations = {
46455 .lookup = proc_sys_lookup,
46456 .permission = proc_sys_permission,
46457 .setattr = proc_sys_setattr,
46458 diff --git a/fs/proc/root.c b/fs/proc/root.c
46459 index 03102d9..4ae347e 100644
46460 --- a/fs/proc/root.c
46461 +++ b/fs/proc/root.c
46462 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
46463 #ifdef CONFIG_PROC_DEVICETREE
46464 proc_device_tree_init();
46465 #endif
46466 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46467 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46468 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46469 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46470 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46471 +#endif
46472 +#else
46473 proc_mkdir("bus", NULL);
46474 +#endif
46475 proc_sys_init();
46476 }
46477
46478 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46479 index 7dcd2a2..b2f410e 100644
46480 --- a/fs/proc/task_mmu.c
46481 +++ b/fs/proc/task_mmu.c
46482 @@ -11,6 +11,7 @@
46483 #include <linux/rmap.h>
46484 #include <linux/swap.h>
46485 #include <linux/swapops.h>
46486 +#include <linux/grsecurity.h>
46487
46488 #include <asm/elf.h>
46489 #include <asm/uaccess.h>
46490 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46491 "VmExe:\t%8lu kB\n"
46492 "VmLib:\t%8lu kB\n"
46493 "VmPTE:\t%8lu kB\n"
46494 - "VmSwap:\t%8lu kB\n",
46495 - hiwater_vm << (PAGE_SHIFT-10),
46496 + "VmSwap:\t%8lu kB\n"
46497 +
46498 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46499 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46500 +#endif
46501 +
46502 + ,hiwater_vm << (PAGE_SHIFT-10),
46503 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46504 mm->locked_vm << (PAGE_SHIFT-10),
46505 mm->pinned_vm << (PAGE_SHIFT-10),
46506 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46507 data << (PAGE_SHIFT-10),
46508 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46509 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46510 - swap << (PAGE_SHIFT-10));
46511 + swap << (PAGE_SHIFT-10)
46512 +
46513 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46514 + , mm->context.user_cs_base, mm->context.user_cs_limit
46515 +#endif
46516 +
46517 + );
46518 }
46519
46520 unsigned long task_vsize(struct mm_struct *mm)
46521 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46522 return ret;
46523 }
46524
46525 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46526 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46527 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46528 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46529 +#endif
46530 +
46531 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46532 {
46533 struct mm_struct *mm = vma->vm_mm;
46534 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46535 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46536 }
46537
46538 - /* We don't show the stack guard page in /proc/maps */
46539 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46540 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46541 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46542 +#else
46543 start = vma->vm_start;
46544 - if (stack_guard_page_start(vma, start))
46545 - start += PAGE_SIZE;
46546 end = vma->vm_end;
46547 - if (stack_guard_page_end(vma, end))
46548 - end -= PAGE_SIZE;
46549 +#endif
46550
46551 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46552 start,
46553 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46554 flags & VM_WRITE ? 'w' : '-',
46555 flags & VM_EXEC ? 'x' : '-',
46556 flags & VM_MAYSHARE ? 's' : 'p',
46557 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46558 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46559 +#else
46560 pgoff,
46561 +#endif
46562 MAJOR(dev), MINOR(dev), ino, &len);
46563
46564 /*
46565 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46566 */
46567 if (file) {
46568 pad_len_spaces(m, len);
46569 - seq_path(m, &file->f_path, "\n");
46570 + seq_path(m, &file->f_path, "\n\\");
46571 } else {
46572 const char *name = arch_vma_name(vma);
46573 if (!name) {
46574 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46575 if (vma->vm_start <= mm->brk &&
46576 vma->vm_end >= mm->start_brk) {
46577 name = "[heap]";
46578 - } else if (vma->vm_start <= mm->start_stack &&
46579 - vma->vm_end >= mm->start_stack) {
46580 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46581 + (vma->vm_start <= mm->start_stack &&
46582 + vma->vm_end >= mm->start_stack)) {
46583 name = "[stack]";
46584 }
46585 } else {
46586 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
46587 struct proc_maps_private *priv = m->private;
46588 struct task_struct *task = priv->task;
46589
46590 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46591 + if (current->exec_id != m->exec_id) {
46592 + gr_log_badprocpid("maps");
46593 + return 0;
46594 + }
46595 +#endif
46596 +
46597 show_map_vma(m, vma);
46598
46599 if (m->count < m->size) /* vma is copied successfully */
46600 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
46601 .private = &mss,
46602 };
46603
46604 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46605 + if (current->exec_id != m->exec_id) {
46606 + gr_log_badprocpid("smaps");
46607 + return 0;
46608 + }
46609 +#endif
46610 memset(&mss, 0, sizeof mss);
46611 - mss.vma = vma;
46612 - /* mmap_sem is held in m_start */
46613 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46614 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46615 -
46616 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46617 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46618 +#endif
46619 + mss.vma = vma;
46620 + /* mmap_sem is held in m_start */
46621 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46622 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46623 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46624 + }
46625 +#endif
46626 show_map_vma(m, vma);
46627
46628 seq_printf(m,
46629 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
46630 "KernelPageSize: %8lu kB\n"
46631 "MMUPageSize: %8lu kB\n"
46632 "Locked: %8lu kB\n",
46633 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46634 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46635 +#else
46636 (vma->vm_end - vma->vm_start) >> 10,
46637 +#endif
46638 mss.resident >> 10,
46639 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46640 mss.shared_clean >> 10,
46641 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
46642 int n;
46643 char buffer[50];
46644
46645 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46646 + if (current->exec_id != m->exec_id) {
46647 + gr_log_badprocpid("numa_maps");
46648 + return 0;
46649 + }
46650 +#endif
46651 +
46652 if (!mm)
46653 return 0;
46654
46655 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
46656 mpol_to_str(buffer, sizeof(buffer), pol, 0);
46657 mpol_cond_put(pol);
46658
46659 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46660 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
46661 +#else
46662 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
46663 +#endif
46664
46665 if (file) {
46666 seq_printf(m, " file=");
46667 - seq_path(m, &file->f_path, "\n\t= ");
46668 + seq_path(m, &file->f_path, "\n\t\\= ");
46669 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46670 seq_printf(m, " heap");
46671 } else if (vma->vm_start <= mm->start_stack &&
46672 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46673 index 980de54..2a4db5f 100644
46674 --- a/fs/proc/task_nommu.c
46675 +++ b/fs/proc/task_nommu.c
46676 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46677 else
46678 bytes += kobjsize(mm);
46679
46680 - if (current->fs && current->fs->users > 1)
46681 + if (current->fs && atomic_read(&current->fs->users) > 1)
46682 sbytes += kobjsize(current->fs);
46683 else
46684 bytes += kobjsize(current->fs);
46685 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46686
46687 if (file) {
46688 pad_len_spaces(m, len);
46689 - seq_path(m, &file->f_path, "");
46690 + seq_path(m, &file->f_path, "\n\\");
46691 } else if (mm) {
46692 if (vma->vm_start <= mm->start_stack &&
46693 vma->vm_end >= mm->start_stack) {
46694 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46695 index d67908b..d13f6a6 100644
46696 --- a/fs/quota/netlink.c
46697 +++ b/fs/quota/netlink.c
46698 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46699 void quota_send_warning(short type, unsigned int id, dev_t dev,
46700 const char warntype)
46701 {
46702 - static atomic_t seq;
46703 + static atomic_unchecked_t seq;
46704 struct sk_buff *skb;
46705 void *msg_head;
46706 int ret;
46707 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46708 "VFS: Not enough memory to send quota warning.\n");
46709 return;
46710 }
46711 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46712 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46713 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46714 if (!msg_head) {
46715 printk(KERN_ERR
46716 diff --git a/fs/readdir.c b/fs/readdir.c
46717 index 356f715..c918d38 100644
46718 --- a/fs/readdir.c
46719 +++ b/fs/readdir.c
46720 @@ -17,6 +17,7 @@
46721 #include <linux/security.h>
46722 #include <linux/syscalls.h>
46723 #include <linux/unistd.h>
46724 +#include <linux/namei.h>
46725
46726 #include <asm/uaccess.h>
46727
46728 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46729
46730 struct readdir_callback {
46731 struct old_linux_dirent __user * dirent;
46732 + struct file * file;
46733 int result;
46734 };
46735
46736 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46737 buf->result = -EOVERFLOW;
46738 return -EOVERFLOW;
46739 }
46740 +
46741 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46742 + return 0;
46743 +
46744 buf->result++;
46745 dirent = buf->dirent;
46746 if (!access_ok(VERIFY_WRITE, dirent,
46747 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46748
46749 buf.result = 0;
46750 buf.dirent = dirent;
46751 + buf.file = file;
46752
46753 error = vfs_readdir(file, fillonedir, &buf);
46754 if (buf.result)
46755 @@ -142,6 +149,7 @@ struct linux_dirent {
46756 struct getdents_callback {
46757 struct linux_dirent __user * current_dir;
46758 struct linux_dirent __user * previous;
46759 + struct file * file;
46760 int count;
46761 int error;
46762 };
46763 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46764 buf->error = -EOVERFLOW;
46765 return -EOVERFLOW;
46766 }
46767 +
46768 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46769 + return 0;
46770 +
46771 dirent = buf->previous;
46772 if (dirent) {
46773 if (__put_user(offset, &dirent->d_off))
46774 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46775 buf.previous = NULL;
46776 buf.count = count;
46777 buf.error = 0;
46778 + buf.file = file;
46779
46780 error = vfs_readdir(file, filldir, &buf);
46781 if (error >= 0)
46782 @@ -229,6 +242,7 @@ out:
46783 struct getdents_callback64 {
46784 struct linux_dirent64 __user * current_dir;
46785 struct linux_dirent64 __user * previous;
46786 + struct file *file;
46787 int count;
46788 int error;
46789 };
46790 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46791 buf->error = -EINVAL; /* only used if we fail.. */
46792 if (reclen > buf->count)
46793 return -EINVAL;
46794 +
46795 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46796 + return 0;
46797 +
46798 dirent = buf->previous;
46799 if (dirent) {
46800 if (__put_user(offset, &dirent->d_off))
46801 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46802
46803 buf.current_dir = dirent;
46804 buf.previous = NULL;
46805 + buf.file = file;
46806 buf.count = count;
46807 buf.error = 0;
46808
46809 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46810 error = buf.error;
46811 lastdirent = buf.previous;
46812 if (lastdirent) {
46813 - typeof(lastdirent->d_off) d_off = file->f_pos;
46814 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46815 if (__put_user(d_off, &lastdirent->d_off))
46816 error = -EFAULT;
46817 else
46818 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46819 index 60c0804..d814f98 100644
46820 --- a/fs/reiserfs/do_balan.c
46821 +++ b/fs/reiserfs/do_balan.c
46822 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46823 return;
46824 }
46825
46826 - atomic_inc(&(fs_generation(tb->tb_sb)));
46827 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46828 do_balance_starts(tb);
46829
46830 /* balance leaf returns 0 except if combining L R and S into
46831 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46832 index 7a99811..a7c96c4 100644
46833 --- a/fs/reiserfs/procfs.c
46834 +++ b/fs/reiserfs/procfs.c
46835 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46836 "SMALL_TAILS " : "NO_TAILS ",
46837 replay_only(sb) ? "REPLAY_ONLY " : "",
46838 convert_reiserfs(sb) ? "CONV " : "",
46839 - atomic_read(&r->s_generation_counter),
46840 + atomic_read_unchecked(&r->s_generation_counter),
46841 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46842 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46843 SF(s_good_search_by_key_reada), SF(s_bmaps),
46844 diff --git a/fs/select.c b/fs/select.c
46845 index d33418f..2a5345e 100644
46846 --- a/fs/select.c
46847 +++ b/fs/select.c
46848 @@ -20,6 +20,7 @@
46849 #include <linux/module.h>
46850 #include <linux/slab.h>
46851 #include <linux/poll.h>
46852 +#include <linux/security.h>
46853 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46854 #include <linux/file.h>
46855 #include <linux/fdtable.h>
46856 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46857 struct poll_list *walk = head;
46858 unsigned long todo = nfds;
46859
46860 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46861 if (nfds > rlimit(RLIMIT_NOFILE))
46862 return -EINVAL;
46863
46864 diff --git a/fs/seq_file.c b/fs/seq_file.c
46865 index dba43c3..9fb8511 100644
46866 --- a/fs/seq_file.c
46867 +++ b/fs/seq_file.c
46868 @@ -9,6 +9,7 @@
46869 #include <linux/module.h>
46870 #include <linux/seq_file.h>
46871 #include <linux/slab.h>
46872 +#include <linux/sched.h>
46873
46874 #include <asm/uaccess.h>
46875 #include <asm/page.h>
46876 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
46877 memset(p, 0, sizeof(*p));
46878 mutex_init(&p->lock);
46879 p->op = op;
46880 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46881 + p->exec_id = current->exec_id;
46882 +#endif
46883
46884 /*
46885 * Wrappers around seq_open(e.g. swaps_open) need to be
46886 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46887 return 0;
46888 }
46889 if (!m->buf) {
46890 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46891 + m->size = PAGE_SIZE;
46892 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46893 if (!m->buf)
46894 return -ENOMEM;
46895 }
46896 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46897 Eoverflow:
46898 m->op->stop(m, p);
46899 kfree(m->buf);
46900 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46901 + m->size <<= 1;
46902 + m->buf = kmalloc(m->size, GFP_KERNEL);
46903 return !m->buf ? -ENOMEM : -EAGAIN;
46904 }
46905
46906 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46907 m->version = file->f_version;
46908 /* grab buffer if we didn't have one */
46909 if (!m->buf) {
46910 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46911 + m->size = PAGE_SIZE;
46912 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46913 if (!m->buf)
46914 goto Enomem;
46915 }
46916 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46917 goto Fill;
46918 m->op->stop(m, p);
46919 kfree(m->buf);
46920 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46921 + m->size <<= 1;
46922 + m->buf = kmalloc(m->size, GFP_KERNEL);
46923 if (!m->buf)
46924 goto Enomem;
46925 m->count = 0;
46926 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
46927 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46928 void *data)
46929 {
46930 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46931 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46932 int res = -ENOMEM;
46933
46934 if (op) {
46935 diff --git a/fs/splice.c b/fs/splice.c
46936 index fa2defa..8601650 100644
46937 --- a/fs/splice.c
46938 +++ b/fs/splice.c
46939 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46940 pipe_lock(pipe);
46941
46942 for (;;) {
46943 - if (!pipe->readers) {
46944 + if (!atomic_read(&pipe->readers)) {
46945 send_sig(SIGPIPE, current, 0);
46946 if (!ret)
46947 ret = -EPIPE;
46948 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46949 do_wakeup = 0;
46950 }
46951
46952 - pipe->waiting_writers++;
46953 + atomic_inc(&pipe->waiting_writers);
46954 pipe_wait(pipe);
46955 - pipe->waiting_writers--;
46956 + atomic_dec(&pipe->waiting_writers);
46957 }
46958
46959 pipe_unlock(pipe);
46960 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46961 old_fs = get_fs();
46962 set_fs(get_ds());
46963 /* The cast to a user pointer is valid due to the set_fs() */
46964 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46965 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46966 set_fs(old_fs);
46967
46968 return res;
46969 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46970 old_fs = get_fs();
46971 set_fs(get_ds());
46972 /* The cast to a user pointer is valid due to the set_fs() */
46973 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46974 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46975 set_fs(old_fs);
46976
46977 return res;
46978 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46979 goto err;
46980
46981 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46982 - vec[i].iov_base = (void __user *) page_address(page);
46983 + vec[i].iov_base = (void __force_user *) page_address(page);
46984 vec[i].iov_len = this_len;
46985 spd.pages[i] = page;
46986 spd.nr_pages++;
46987 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46988 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46989 {
46990 while (!pipe->nrbufs) {
46991 - if (!pipe->writers)
46992 + if (!atomic_read(&pipe->writers))
46993 return 0;
46994
46995 - if (!pipe->waiting_writers && sd->num_spliced)
46996 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46997 return 0;
46998
46999 if (sd->flags & SPLICE_F_NONBLOCK)
47000 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
47001 * out of the pipe right after the splice_to_pipe(). So set
47002 * PIPE_READERS appropriately.
47003 */
47004 - pipe->readers = 1;
47005 + atomic_set(&pipe->readers, 1);
47006
47007 current->splice_pipe = pipe;
47008 }
47009 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47010 ret = -ERESTARTSYS;
47011 break;
47012 }
47013 - if (!pipe->writers)
47014 + if (!atomic_read(&pipe->writers))
47015 break;
47016 - if (!pipe->waiting_writers) {
47017 + if (!atomic_read(&pipe->waiting_writers)) {
47018 if (flags & SPLICE_F_NONBLOCK) {
47019 ret = -EAGAIN;
47020 break;
47021 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47022 pipe_lock(pipe);
47023
47024 while (pipe->nrbufs >= pipe->buffers) {
47025 - if (!pipe->readers) {
47026 + if (!atomic_read(&pipe->readers)) {
47027 send_sig(SIGPIPE, current, 0);
47028 ret = -EPIPE;
47029 break;
47030 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47031 ret = -ERESTARTSYS;
47032 break;
47033 }
47034 - pipe->waiting_writers++;
47035 + atomic_inc(&pipe->waiting_writers);
47036 pipe_wait(pipe);
47037 - pipe->waiting_writers--;
47038 + atomic_dec(&pipe->waiting_writers);
47039 }
47040
47041 pipe_unlock(pipe);
47042 @@ -1819,14 +1819,14 @@ retry:
47043 pipe_double_lock(ipipe, opipe);
47044
47045 do {
47046 - if (!opipe->readers) {
47047 + if (!atomic_read(&opipe->readers)) {
47048 send_sig(SIGPIPE, current, 0);
47049 if (!ret)
47050 ret = -EPIPE;
47051 break;
47052 }
47053
47054 - if (!ipipe->nrbufs && !ipipe->writers)
47055 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
47056 break;
47057
47058 /*
47059 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47060 pipe_double_lock(ipipe, opipe);
47061
47062 do {
47063 - if (!opipe->readers) {
47064 + if (!atomic_read(&opipe->readers)) {
47065 send_sig(SIGPIPE, current, 0);
47066 if (!ret)
47067 ret = -EPIPE;
47068 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47069 * return EAGAIN if we have the potential of some data in the
47070 * future, otherwise just return 0
47071 */
47072 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
47073 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
47074 ret = -EAGAIN;
47075
47076 pipe_unlock(ipipe);
47077 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
47078 index 7fdf6a7..e6cd8ad 100644
47079 --- a/fs/sysfs/dir.c
47080 +++ b/fs/sysfs/dir.c
47081 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
47082 struct sysfs_dirent *sd;
47083 int rc;
47084
47085 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47086 + const char *parent_name = parent_sd->s_name;
47087 +
47088 + mode = S_IFDIR | S_IRWXU;
47089 +
47090 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
47091 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
47092 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
47093 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
47094 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
47095 +#endif
47096 +
47097 /* allocate */
47098 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
47099 if (!sd)
47100 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
47101 index 779789a..f58193c 100644
47102 --- a/fs/sysfs/file.c
47103 +++ b/fs/sysfs/file.c
47104 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
47105
47106 struct sysfs_open_dirent {
47107 atomic_t refcnt;
47108 - atomic_t event;
47109 + atomic_unchecked_t event;
47110 wait_queue_head_t poll;
47111 struct list_head buffers; /* goes through sysfs_buffer.list */
47112 };
47113 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
47114 if (!sysfs_get_active(attr_sd))
47115 return -ENODEV;
47116
47117 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
47118 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
47119 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
47120
47121 sysfs_put_active(attr_sd);
47122 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
47123 return -ENOMEM;
47124
47125 atomic_set(&new_od->refcnt, 0);
47126 - atomic_set(&new_od->event, 1);
47127 + atomic_set_unchecked(&new_od->event, 1);
47128 init_waitqueue_head(&new_od->poll);
47129 INIT_LIST_HEAD(&new_od->buffers);
47130 goto retry;
47131 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
47132
47133 sysfs_put_active(attr_sd);
47134
47135 - if (buffer->event != atomic_read(&od->event))
47136 + if (buffer->event != atomic_read_unchecked(&od->event))
47137 goto trigger;
47138
47139 return DEFAULT_POLLMASK;
47140 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
47141
47142 od = sd->s_attr.open;
47143 if (od) {
47144 - atomic_inc(&od->event);
47145 + atomic_inc_unchecked(&od->event);
47146 wake_up_interruptible(&od->poll);
47147 }
47148
47149 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
47150 index a7ac78f..02158e1 100644
47151 --- a/fs/sysfs/symlink.c
47152 +++ b/fs/sysfs/symlink.c
47153 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47154
47155 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47156 {
47157 - char *page = nd_get_link(nd);
47158 + const char *page = nd_get_link(nd);
47159 if (!IS_ERR(page))
47160 free_page((unsigned long)page);
47161 }
47162 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
47163 index c175b4d..8f36a16 100644
47164 --- a/fs/udf/misc.c
47165 +++ b/fs/udf/misc.c
47166 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
47167
47168 u8 udf_tag_checksum(const struct tag *t)
47169 {
47170 - u8 *data = (u8 *)t;
47171 + const u8 *data = (const u8 *)t;
47172 u8 checksum = 0;
47173 int i;
47174 for (i = 0; i < sizeof(struct tag); ++i)
47175 diff --git a/fs/utimes.c b/fs/utimes.c
47176 index ba653f3..06ea4b1 100644
47177 --- a/fs/utimes.c
47178 +++ b/fs/utimes.c
47179 @@ -1,6 +1,7 @@
47180 #include <linux/compiler.h>
47181 #include <linux/file.h>
47182 #include <linux/fs.h>
47183 +#include <linux/security.h>
47184 #include <linux/linkage.h>
47185 #include <linux/mount.h>
47186 #include <linux/namei.h>
47187 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47188 goto mnt_drop_write_and_out;
47189 }
47190 }
47191 +
47192 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47193 + error = -EACCES;
47194 + goto mnt_drop_write_and_out;
47195 + }
47196 +
47197 mutex_lock(&inode->i_mutex);
47198 error = notify_change(path->dentry, &newattrs);
47199 mutex_unlock(&inode->i_mutex);
47200 diff --git a/fs/xattr.c b/fs/xattr.c
47201 index 67583de..c5aad14 100644
47202 --- a/fs/xattr.c
47203 +++ b/fs/xattr.c
47204 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47205 * Extended attribute SET operations
47206 */
47207 static long
47208 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
47209 +setxattr(struct path *path, const char __user *name, const void __user *value,
47210 size_t size, int flags)
47211 {
47212 int error;
47213 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47214 return PTR_ERR(kvalue);
47215 }
47216
47217 - error = vfs_setxattr(d, kname, kvalue, size, flags);
47218 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47219 + error = -EACCES;
47220 + goto out;
47221 + }
47222 +
47223 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47224 +out:
47225 kfree(kvalue);
47226 return error;
47227 }
47228 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47229 return error;
47230 error = mnt_want_write(path.mnt);
47231 if (!error) {
47232 - error = setxattr(path.dentry, name, value, size, flags);
47233 + error = setxattr(&path, name, value, size, flags);
47234 mnt_drop_write(path.mnt);
47235 }
47236 path_put(&path);
47237 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47238 return error;
47239 error = mnt_want_write(path.mnt);
47240 if (!error) {
47241 - error = setxattr(path.dentry, name, value, size, flags);
47242 + error = setxattr(&path, name, value, size, flags);
47243 mnt_drop_write(path.mnt);
47244 }
47245 path_put(&path);
47246 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47247 const void __user *,value, size_t, size, int, flags)
47248 {
47249 struct file *f;
47250 - struct dentry *dentry;
47251 int error = -EBADF;
47252
47253 f = fget(fd);
47254 if (!f)
47255 return error;
47256 - dentry = f->f_path.dentry;
47257 - audit_inode(NULL, dentry);
47258 + audit_inode(NULL, f->f_path.dentry);
47259 error = mnt_want_write_file(f);
47260 if (!error) {
47261 - error = setxattr(dentry, name, value, size, flags);
47262 + error = setxattr(&f->f_path, name, value, size, flags);
47263 mnt_drop_write(f->f_path.mnt);
47264 }
47265 fput(f);
47266 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47267 index 8d5a506..7f62712 100644
47268 --- a/fs/xattr_acl.c
47269 +++ b/fs/xattr_acl.c
47270 @@ -17,8 +17,8 @@
47271 struct posix_acl *
47272 posix_acl_from_xattr(const void *value, size_t size)
47273 {
47274 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47275 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47276 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47277 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47278 int count;
47279 struct posix_acl *acl;
47280 struct posix_acl_entry *acl_e;
47281 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47282 index d0ab788..827999b 100644
47283 --- a/fs/xfs/xfs_bmap.c
47284 +++ b/fs/xfs/xfs_bmap.c
47285 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47286 int nmap,
47287 int ret_nmap);
47288 #else
47289 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47290 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47291 #endif /* DEBUG */
47292
47293 STATIC int
47294 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47295 index 79d05e8..e3e5861 100644
47296 --- a/fs/xfs/xfs_dir2_sf.c
47297 +++ b/fs/xfs/xfs_dir2_sf.c
47298 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47299 }
47300
47301 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47302 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47303 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47304 + char name[sfep->namelen];
47305 + memcpy(name, sfep->name, sfep->namelen);
47306 + if (filldir(dirent, name, sfep->namelen,
47307 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47308 + *offset = off & 0x7fffffff;
47309 + return 0;
47310 + }
47311 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47312 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47313 *offset = off & 0x7fffffff;
47314 return 0;
47315 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47316 index d99a905..9f88202 100644
47317 --- a/fs/xfs/xfs_ioctl.c
47318 +++ b/fs/xfs/xfs_ioctl.c
47319 @@ -128,7 +128,7 @@ xfs_find_handle(
47320 }
47321
47322 error = -EFAULT;
47323 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47324 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47325 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47326 goto out_put;
47327
47328 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47329 index 23ce927..e274cc1 100644
47330 --- a/fs/xfs/xfs_iops.c
47331 +++ b/fs/xfs/xfs_iops.c
47332 @@ -447,7 +447,7 @@ xfs_vn_put_link(
47333 struct nameidata *nd,
47334 void *p)
47335 {
47336 - char *s = nd_get_link(nd);
47337 + const char *s = nd_get_link(nd);
47338
47339 if (!IS_ERR(s))
47340 kfree(s);
47341 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47342 new file mode 100644
47343 index 0000000..41df561
47344 --- /dev/null
47345 +++ b/grsecurity/Kconfig
47346 @@ -0,0 +1,1075 @@
47347 +#
47348 +# grecurity configuration
47349 +#
47350 +
47351 +menu "Grsecurity"
47352 +
47353 +config GRKERNSEC
47354 + bool "Grsecurity"
47355 + select CRYPTO
47356 + select CRYPTO_SHA256
47357 + help
47358 + If you say Y here, you will be able to configure many features
47359 + that will enhance the security of your system. It is highly
47360 + recommended that you say Y here and read through the help
47361 + for each option so that you fully understand the features and
47362 + can evaluate their usefulness for your machine.
47363 +
47364 +choice
47365 + prompt "Security Level"
47366 + depends on GRKERNSEC
47367 + default GRKERNSEC_CUSTOM
47368 +
47369 +config GRKERNSEC_LOW
47370 + bool "Low"
47371 + select GRKERNSEC_LINK
47372 + select GRKERNSEC_FIFO
47373 + select GRKERNSEC_RANDNET
47374 + select GRKERNSEC_DMESG
47375 + select GRKERNSEC_CHROOT
47376 + select GRKERNSEC_CHROOT_CHDIR
47377 +
47378 + help
47379 + If you choose this option, several of the grsecurity options will
47380 + be enabled that will give you greater protection against a number
47381 + of attacks, while assuring that none of your software will have any
47382 + conflicts with the additional security measures. If you run a lot
47383 + of unusual software, or you are having problems with the higher
47384 + security levels, you should say Y here. With this option, the
47385 + following features are enabled:
47386 +
47387 + - Linking restrictions
47388 + - FIFO restrictions
47389 + - Restricted dmesg
47390 + - Enforced chdir("/") on chroot
47391 + - Runtime module disabling
47392 +
47393 +config GRKERNSEC_MEDIUM
47394 + bool "Medium"
47395 + select PAX
47396 + select PAX_EI_PAX
47397 + select PAX_PT_PAX_FLAGS
47398 + select PAX_HAVE_ACL_FLAGS
47399 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47400 + select GRKERNSEC_CHROOT
47401 + select GRKERNSEC_CHROOT_SYSCTL
47402 + select GRKERNSEC_LINK
47403 + select GRKERNSEC_FIFO
47404 + select GRKERNSEC_DMESG
47405 + select GRKERNSEC_RANDNET
47406 + select GRKERNSEC_FORKFAIL
47407 + select GRKERNSEC_TIME
47408 + select GRKERNSEC_SIGNAL
47409 + select GRKERNSEC_CHROOT
47410 + select GRKERNSEC_CHROOT_UNIX
47411 + select GRKERNSEC_CHROOT_MOUNT
47412 + select GRKERNSEC_CHROOT_PIVOT
47413 + select GRKERNSEC_CHROOT_DOUBLE
47414 + select GRKERNSEC_CHROOT_CHDIR
47415 + select GRKERNSEC_CHROOT_MKNOD
47416 + select GRKERNSEC_PROC
47417 + select GRKERNSEC_PROC_USERGROUP
47418 + select PAX_RANDUSTACK
47419 + select PAX_ASLR
47420 + select PAX_RANDMMAP
47421 + select PAX_REFCOUNT if (X86 || SPARC64)
47422 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47423 +
47424 + help
47425 + If you say Y here, several features in addition to those included
47426 + in the low additional security level will be enabled. These
47427 + features provide even more security to your system, though in rare
47428 + cases they may be incompatible with very old or poorly written
47429 + software. If you enable this option, make sure that your auth
47430 + service (identd) is running as gid 1001. With this option,
47431 + the following features (in addition to those provided in the
47432 + low additional security level) will be enabled:
47433 +
47434 + - Failed fork logging
47435 + - Time change logging
47436 + - Signal logging
47437 + - Deny mounts in chroot
47438 + - Deny double chrooting
47439 + - Deny sysctl writes in chroot
47440 + - Deny mknod in chroot
47441 + - Deny access to abstract AF_UNIX sockets out of chroot
47442 + - Deny pivot_root in chroot
47443 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47444 + - /proc restrictions with special GID set to 10 (usually wheel)
47445 + - Address Space Layout Randomization (ASLR)
47446 + - Prevent exploitation of most refcount overflows
47447 + - Bounds checking of copying between the kernel and userland
47448 +
47449 +config GRKERNSEC_HIGH
47450 + bool "High"
47451 + select GRKERNSEC_LINK
47452 + select GRKERNSEC_FIFO
47453 + select GRKERNSEC_DMESG
47454 + select GRKERNSEC_FORKFAIL
47455 + select GRKERNSEC_TIME
47456 + select GRKERNSEC_SIGNAL
47457 + select GRKERNSEC_CHROOT
47458 + select GRKERNSEC_CHROOT_SHMAT
47459 + select GRKERNSEC_CHROOT_UNIX
47460 + select GRKERNSEC_CHROOT_MOUNT
47461 + select GRKERNSEC_CHROOT_FCHDIR
47462 + select GRKERNSEC_CHROOT_PIVOT
47463 + select GRKERNSEC_CHROOT_DOUBLE
47464 + select GRKERNSEC_CHROOT_CHDIR
47465 + select GRKERNSEC_CHROOT_MKNOD
47466 + select GRKERNSEC_CHROOT_CAPS
47467 + select GRKERNSEC_CHROOT_SYSCTL
47468 + select GRKERNSEC_CHROOT_FINDTASK
47469 + select GRKERNSEC_SYSFS_RESTRICT
47470 + select GRKERNSEC_PROC
47471 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47472 + select GRKERNSEC_HIDESYM
47473 + select GRKERNSEC_BRUTE
47474 + select GRKERNSEC_PROC_USERGROUP
47475 + select GRKERNSEC_KMEM
47476 + select GRKERNSEC_RESLOG
47477 + select GRKERNSEC_RANDNET
47478 + select GRKERNSEC_PROC_ADD
47479 + select GRKERNSEC_CHROOT_CHMOD
47480 + select GRKERNSEC_CHROOT_NICE
47481 + select GRKERNSEC_SETXID
47482 + select GRKERNSEC_AUDIT_MOUNT
47483 + select GRKERNSEC_MODHARDEN if (MODULES)
47484 + select GRKERNSEC_HARDEN_PTRACE
47485 + select GRKERNSEC_PTRACE_READEXEC
47486 + select GRKERNSEC_VM86 if (X86_32)
47487 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47488 + select PAX
47489 + select PAX_RANDUSTACK
47490 + select PAX_ASLR
47491 + select PAX_RANDMMAP
47492 + select PAX_NOEXEC
47493 + select PAX_MPROTECT
47494 + select PAX_EI_PAX
47495 + select PAX_PT_PAX_FLAGS
47496 + select PAX_HAVE_ACL_FLAGS
47497 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47498 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
47499 + select PAX_RANDKSTACK if (X86_TSC && X86)
47500 + select PAX_SEGMEXEC if (X86_32)
47501 + select PAX_PAGEEXEC
47502 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47503 + select PAX_EMUTRAMP if (PARISC)
47504 + select PAX_EMUSIGRT if (PARISC)
47505 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47506 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47507 + select PAX_REFCOUNT if (X86 || SPARC64)
47508 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47509 + help
47510 + If you say Y here, many of the features of grsecurity will be
47511 + enabled, which will protect you against many kinds of attacks
47512 + against your system. The heightened security comes at a cost
47513 + of an increased chance of incompatibilities with rare software
47514 + on your machine. Since this security level enables PaX, you should
47515 + view <http://pax.grsecurity.net> and read about the PaX
47516 + project. While you are there, download chpax and run it on
47517 + binaries that cause problems with PaX. Also remember that
47518 + since the /proc restrictions are enabled, you must run your
47519 + identd as gid 1001. This security level enables the following
47520 + features in addition to those listed in the low and medium
47521 + security levels:
47522 +
47523 + - Additional /proc restrictions
47524 + - Chmod restrictions in chroot
47525 + - No signals, ptrace, or viewing of processes outside of chroot
47526 + - Capability restrictions in chroot
47527 + - Deny fchdir out of chroot
47528 + - Priority restrictions in chroot
47529 + - Segmentation-based implementation of PaX
47530 + - Mprotect restrictions
47531 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47532 + - Kernel stack randomization
47533 + - Mount/unmount/remount logging
47534 + - Kernel symbol hiding
47535 + - Hardening of module auto-loading
47536 + - Ptrace restrictions
47537 + - Restricted vm86 mode
47538 + - Restricted sysfs/debugfs
47539 + - Active kernel exploit response
47540 +
47541 +config GRKERNSEC_CUSTOM
47542 + bool "Custom"
47543 + help
47544 + If you say Y here, you will be able to configure every grsecurity
47545 + option, which allows you to enable many more features that aren't
47546 + covered in the basic security levels. These additional features
47547 + include TPE, socket restrictions, and the sysctl system for
47548 + grsecurity. It is advised that you read through the help for
47549 + each option to determine its usefulness in your situation.
47550 +
47551 +endchoice
47552 +
47553 +menu "Memory Protections"
47554 +depends on GRKERNSEC
47555 +
47556 +config GRKERNSEC_KMEM
47557 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47558 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47559 + help
47560 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47561 + be written to or read from to modify or leak the contents of the running
47562 + kernel. /dev/port will also not be allowed to be opened. If you have module
47563 + support disabled, enabling this will close up four ways that are
47564 + currently used to insert malicious code into the running kernel.
47565 + Even with all these features enabled, we still highly recommend that
47566 + you use the RBAC system, as it is still possible for an attacker to
47567 + modify the running kernel through privileged I/O granted by ioperm/iopl.
47568 + If you are not using XFree86, you may be able to stop this additional
47569 + case by enabling the 'Disable privileged I/O' option. Though nothing
47570 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47571 + but only to video memory, which is the only writing we allow in this
47572 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47573 + not be allowed to mprotect it with PROT_WRITE later.
47574 + It is highly recommended that you say Y here if you meet all the
47575 + conditions above.
47576 +
47577 +config GRKERNSEC_VM86
47578 + bool "Restrict VM86 mode"
47579 + depends on X86_32
47580 +
47581 + help
47582 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47583 + make use of a special execution mode on 32bit x86 processors called
47584 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47585 + video cards and will still work with this option enabled. The purpose
47586 + of the option is to prevent exploitation of emulation errors in
47587 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
47588 + Nearly all users should be able to enable this option.
47589 +
47590 +config GRKERNSEC_IO
47591 + bool "Disable privileged I/O"
47592 + depends on X86
47593 + select RTC_CLASS
47594 + select RTC_INTF_DEV
47595 + select RTC_DRV_CMOS
47596 +
47597 + help
47598 + If you say Y here, all ioperm and iopl calls will return an error.
47599 + Ioperm and iopl can be used to modify the running kernel.
47600 + Unfortunately, some programs need this access to operate properly,
47601 + the most notable of which are XFree86 and hwclock. hwclock can be
47602 + remedied by having RTC support in the kernel, so real-time
47603 + clock support is enabled if this option is enabled, to ensure
47604 + that hwclock operates correctly. XFree86 still will not
47605 + operate correctly with this option enabled, so DO NOT CHOOSE Y
47606 + IF YOU USE XFree86. If you use XFree86 and you still want to
47607 + protect your kernel against modification, use the RBAC system.
47608 +
47609 +config GRKERNSEC_PROC_MEMMAP
47610 + bool "Harden ASLR against information leaks and entropy reduction"
47611 + default y if (PAX_NOEXEC || PAX_ASLR)
47612 + depends on PAX_NOEXEC || PAX_ASLR
47613 + help
47614 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47615 + give no information about the addresses of its mappings if
47616 + PaX features that rely on random addresses are enabled on the task.
47617 + In addition to sanitizing this information and disabling other
47618 + dangerous sources of information, this option causes reads of sensitive
47619 + /proc/<pid> entries where the file descriptor was opened in a different
47620 + task than the one performing the read. Such attempts are logged.
47621 + Finally, this option limits argv/env strings for suid/sgid binaries
47622 + to 1MB to prevent a complete exhaustion of the stack entropy provided
47623 + by ASLR.
47624 + If you use PaX it is essential that you say Y here as it closes up
47625 + several holes that make full ASLR useless for suid/sgid binaries.
47626 +
47627 +config GRKERNSEC_BRUTE
47628 + bool "Deter exploit bruteforcing"
47629 + help
47630 + If you say Y here, attempts to bruteforce exploits against forking
47631 + daemons such as apache or sshd, as well as against suid/sgid binaries
47632 + will be deterred. When a child of a forking daemon is killed by PaX
47633 + or crashes due to an illegal instruction or other suspicious signal,
47634 + the parent process will be delayed 30 seconds upon every subsequent
47635 + fork until the administrator is able to assess the situation and
47636 + restart the daemon.
47637 + In the suid/sgid case, the attempt is logged, the user has all their
47638 + processes terminated, and they are prevented from executing any further
47639 + processes for 15 minutes.
47640 + It is recommended that you also enable signal logging in the auditing
47641 + section so that logs are generated when a process triggers a suspicious
47642 + signal.
47643 + If the sysctl option is enabled, a sysctl option with name
47644 + "deter_bruteforce" is created.
47645 +
47646 +
47647 +config GRKERNSEC_MODHARDEN
47648 + bool "Harden module auto-loading"
47649 + depends on MODULES
47650 + help
47651 + If you say Y here, module auto-loading in response to use of some
47652 + feature implemented by an unloaded module will be restricted to
47653 + root users. Enabling this option helps defend against attacks
47654 + by unprivileged users who abuse the auto-loading behavior to
47655 + cause a vulnerable module to load that is then exploited.
47656 +
47657 + If this option prevents a legitimate use of auto-loading for a
47658 + non-root user, the administrator can execute modprobe manually
47659 + with the exact name of the module mentioned in the alert log.
47660 + Alternatively, the administrator can add the module to the list
47661 + of modules loaded at boot by modifying init scripts.
47662 +
47663 + Modification of init scripts will most likely be needed on
47664 + Ubuntu servers with encrypted home directory support enabled,
47665 + as the first non-root user logging in will cause the ecb(aes),
47666 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47667 +
47668 +config GRKERNSEC_HIDESYM
47669 + bool "Hide kernel symbols"
47670 + help
47671 + If you say Y here, getting information on loaded modules, and
47672 + displaying all kernel symbols through a syscall will be restricted
47673 + to users with CAP_SYS_MODULE. For software compatibility reasons,
47674 + /proc/kallsyms will be restricted to the root user. The RBAC
47675 + system can hide that entry even from root.
47676 +
47677 + This option also prevents leaking of kernel addresses through
47678 + several /proc entries.
47679 +
47680 + Note that this option is only effective provided the following
47681 + conditions are met:
47682 + 1) The kernel using grsecurity is not precompiled by some distribution
47683 + 2) You have also enabled GRKERNSEC_DMESG
47684 + 3) You are using the RBAC system and hiding other files such as your
47685 + kernel image and System.map. Alternatively, enabling this option
47686 + causes the permissions on /boot, /lib/modules, and the kernel
47687 + source directory to change at compile time to prevent
47688 + reading by non-root users.
47689 + If the above conditions are met, this option will aid in providing a
47690 + useful protection against local kernel exploitation of overflows
47691 + and arbitrary read/write vulnerabilities.
47692 +
47693 +config GRKERNSEC_KERN_LOCKOUT
47694 + bool "Active kernel exploit response"
47695 + depends on X86 || ARM || PPC || SPARC
47696 + help
47697 + If you say Y here, when a PaX alert is triggered due to suspicious
47698 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47699 + or an OOPs occurs due to bad memory accesses, instead of just
47700 + terminating the offending process (and potentially allowing
47701 + a subsequent exploit from the same user), we will take one of two
47702 + actions:
47703 + If the user was root, we will panic the system
47704 + If the user was non-root, we will log the attempt, terminate
47705 + all processes owned by the user, then prevent them from creating
47706 + any new processes until the system is restarted
47707 + This deters repeated kernel exploitation/bruteforcing attempts
47708 + and is useful for later forensics.
47709 +
47710 +endmenu
47711 +menu "Role Based Access Control Options"
47712 +depends on GRKERNSEC
47713 +
47714 +config GRKERNSEC_RBAC_DEBUG
47715 + bool
47716 +
47717 +config GRKERNSEC_NO_RBAC
47718 + bool "Disable RBAC system"
47719 + help
47720 + If you say Y here, the /dev/grsec device will be removed from the kernel,
47721 + preventing the RBAC system from being enabled. You should only say Y
47722 + here if you have no intention of using the RBAC system, so as to prevent
47723 + an attacker with root access from misusing the RBAC system to hide files
47724 + and processes when loadable module support and /dev/[k]mem have been
47725 + locked down.
47726 +
47727 +config GRKERNSEC_ACL_HIDEKERN
47728 + bool "Hide kernel processes"
47729 + help
47730 + If you say Y here, all kernel threads will be hidden to all
47731 + processes but those whose subject has the "view hidden processes"
47732 + flag.
47733 +
47734 +config GRKERNSEC_ACL_MAXTRIES
47735 + int "Maximum tries before password lockout"
47736 + default 3
47737 + help
47738 + This option enforces the maximum number of times a user can attempt
47739 + to authorize themselves with the grsecurity RBAC system before being
47740 + denied the ability to attempt authorization again for a specified time.
47741 + The lower the number, the harder it will be to brute-force a password.
47742 +
47743 +config GRKERNSEC_ACL_TIMEOUT
47744 + int "Time to wait after max password tries, in seconds"
47745 + default 30
47746 + help
47747 + This option specifies the time the user must wait after attempting to
47748 + authorize to the RBAC system with the maximum number of invalid
47749 + passwords. The higher the number, the harder it will be to brute-force
47750 + a password.
47751 +
47752 +endmenu
47753 +menu "Filesystem Protections"
47754 +depends on GRKERNSEC
47755 +
47756 +config GRKERNSEC_PROC
47757 + bool "Proc restrictions"
47758 + help
47759 + If you say Y here, the permissions of the /proc filesystem
47760 + will be altered to enhance system security and privacy. You MUST
47761 + choose either a user only restriction or a user and group restriction.
47762 + Depending upon the option you choose, you can either restrict users to
47763 + see only the processes they themselves run, or choose a group that can
47764 + view all processes and files normally restricted to root if you choose
47765 + the "restrict to user only" option. NOTE: If you're running identd as
47766 + a non-root user, you will have to run it as the group you specify here.
47767 +
47768 +config GRKERNSEC_PROC_USER
47769 + bool "Restrict /proc to user only"
47770 + depends on GRKERNSEC_PROC
47771 + help
47772 + If you say Y here, non-root users will only be able to view their own
47773 + processes, and restricts them from viewing network-related information,
47774 + and viewing kernel symbol and module information.
47775 +
47776 +config GRKERNSEC_PROC_USERGROUP
47777 + bool "Allow special group"
47778 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47779 + help
47780 + If you say Y here, you will be able to select a group that will be
47781 + able to view all processes and network-related information. If you've
47782 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47783 + remain hidden. This option is useful if you want to run identd as
47784 + a non-root user.
47785 +
47786 +config GRKERNSEC_PROC_GID
47787 + int "GID for special group"
47788 + depends on GRKERNSEC_PROC_USERGROUP
47789 + default 1001
47790 +
47791 +config GRKERNSEC_PROC_ADD
47792 + bool "Additional restrictions"
47793 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47794 + help
47795 + If you say Y here, additional restrictions will be placed on
47796 + /proc that keep normal users from viewing device information and
47797 + slabinfo information that could be useful for exploits.
47798 +
47799 +config GRKERNSEC_LINK
47800 + bool "Linking restrictions"
47801 + help
47802 + If you say Y here, /tmp race exploits will be prevented, since users
47803 + will no longer be able to follow symlinks owned by other users in
47804 + world-writable +t directories (e.g. /tmp), unless the owner of the
47805 + symlink is the owner of the directory. users will also not be
47806 + able to hardlink to files they do not own. If the sysctl option is
47807 + enabled, a sysctl option with name "linking_restrictions" is created.
47808 +
47809 +config GRKERNSEC_FIFO
47810 + bool "FIFO restrictions"
47811 + help
47812 + If you say Y here, users will not be able to write to FIFOs they don't
47813 + own in world-writable +t directories (e.g. /tmp), unless the owner of
47814 + the FIFO is the same owner of the directory it's held in. If the sysctl
47815 + option is enabled, a sysctl option with name "fifo_restrictions" is
47816 + created.
47817 +
47818 +config GRKERNSEC_SYSFS_RESTRICT
47819 + bool "Sysfs/debugfs restriction"
47820 + depends on SYSFS
47821 + help
47822 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47823 + any filesystem normally mounted under it (e.g. debugfs) will be
47824 + mostly accessible only by root. These filesystems generally provide access
47825 + to hardware and debug information that isn't appropriate for unprivileged
47826 + users of the system. Sysfs and debugfs have also become a large source
47827 + of new vulnerabilities, ranging from infoleaks to local compromise.
47828 + There has been very little oversight with an eye toward security involved
47829 + in adding new exporters of information to these filesystems, so their
47830 + use is discouraged.
47831 + For reasons of compatibility, a few directories have been whitelisted
47832 + for access by non-root users:
47833 + /sys/fs/selinux
47834 + /sys/fs/fuse
47835 + /sys/devices/system/cpu
47836 +
47837 +config GRKERNSEC_ROFS
47838 + bool "Runtime read-only mount protection"
47839 + help
47840 + If you say Y here, a sysctl option with name "romount_protect" will
47841 + be created. By setting this option to 1 at runtime, filesystems
47842 + will be protected in the following ways:
47843 + * No new writable mounts will be allowed
47844 + * Existing read-only mounts won't be able to be remounted read/write
47845 + * Write operations will be denied on all block devices
47846 + This option acts independently of grsec_lock: once it is set to 1,
47847 + it cannot be turned off. Therefore, please be mindful of the resulting
47848 + behavior if this option is enabled in an init script on a read-only
47849 + filesystem. This feature is mainly intended for secure embedded systems.
47850 +
47851 +config GRKERNSEC_CHROOT
47852 + bool "Chroot jail restrictions"
47853 + help
47854 + If you say Y here, you will be able to choose several options that will
47855 + make breaking out of a chrooted jail much more difficult. If you
47856 + encounter no software incompatibilities with the following options, it
47857 + is recommended that you enable each one.
47858 +
47859 +config GRKERNSEC_CHROOT_MOUNT
47860 + bool "Deny mounts"
47861 + depends on GRKERNSEC_CHROOT
47862 + help
47863 + If you say Y here, processes inside a chroot will not be able to
47864 + mount or remount filesystems. If the sysctl option is enabled, a
47865 + sysctl option with name "chroot_deny_mount" is created.
47866 +
47867 +config GRKERNSEC_CHROOT_DOUBLE
47868 + bool "Deny double-chroots"
47869 + depends on GRKERNSEC_CHROOT
47870 + help
47871 + If you say Y here, processes inside a chroot will not be able to chroot
47872 + again outside the chroot. This is a widely used method of breaking
47873 + out of a chroot jail and should not be allowed. If the sysctl
47874 + option is enabled, a sysctl option with name
47875 + "chroot_deny_chroot" is created.
47876 +
47877 +config GRKERNSEC_CHROOT_PIVOT
47878 + bool "Deny pivot_root in chroot"
47879 + depends on GRKERNSEC_CHROOT
47880 + help
47881 + If you say Y here, processes inside a chroot will not be able to use
47882 + a function called pivot_root() that was introduced in Linux 2.3.41. It
47883 + works similar to chroot in that it changes the root filesystem. This
47884 + function could be misused in a chrooted process to attempt to break out
47885 + of the chroot, and therefore should not be allowed. If the sysctl
47886 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
47887 + created.
47888 +
47889 +config GRKERNSEC_CHROOT_CHDIR
47890 + bool "Enforce chdir(\"/\") on all chroots"
47891 + depends on GRKERNSEC_CHROOT
47892 + help
47893 + If you say Y here, the current working directory of all newly-chrooted
47894 + applications will be set to the the root directory of the chroot.
47895 + The man page on chroot(2) states:
47896 + Note that this call does not change the current working
47897 + directory, so that `.' can be outside the tree rooted at
47898 + `/'. In particular, the super-user can escape from a
47899 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47900 +
47901 + It is recommended that you say Y here, since it's not known to break
47902 + any software. If the sysctl option is enabled, a sysctl option with
47903 + name "chroot_enforce_chdir" is created.
47904 +
47905 +config GRKERNSEC_CHROOT_CHMOD
47906 + bool "Deny (f)chmod +s"
47907 + depends on GRKERNSEC_CHROOT
47908 + help
47909 + If you say Y here, processes inside a chroot will not be able to chmod
47910 + or fchmod files to make them have suid or sgid bits. This protects
47911 + against another published method of breaking a chroot. If the sysctl
47912 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
47913 + created.
47914 +
47915 +config GRKERNSEC_CHROOT_FCHDIR
47916 + bool "Deny fchdir out of chroot"
47917 + depends on GRKERNSEC_CHROOT
47918 + help
47919 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
47920 + to a file descriptor of the chrooting process that points to a directory
47921 + outside the filesystem will be stopped. If the sysctl option
47922 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47923 +
47924 +config GRKERNSEC_CHROOT_MKNOD
47925 + bool "Deny mknod"
47926 + depends on GRKERNSEC_CHROOT
47927 + help
47928 + If you say Y here, processes inside a chroot will not be allowed to
47929 + mknod. The problem with using mknod inside a chroot is that it
47930 + would allow an attacker to create a device entry that is the same
47931 + as one on the physical root of your system, which could range from
47932 + anything from the console device to a device for your harddrive (which
47933 + they could then use to wipe the drive or steal data). It is recommended
47934 + that you say Y here, unless you run into software incompatibilities.
47935 + If the sysctl option is enabled, a sysctl option with name
47936 + "chroot_deny_mknod" is created.
47937 +
47938 +config GRKERNSEC_CHROOT_SHMAT
47939 + bool "Deny shmat() out of chroot"
47940 + depends on GRKERNSEC_CHROOT
47941 + help
47942 + If you say Y here, processes inside a chroot will not be able to attach
47943 + to shared memory segments that were created outside of the chroot jail.
47944 + It is recommended that you say Y here. If the sysctl option is enabled,
47945 + a sysctl option with name "chroot_deny_shmat" is created.
47946 +
47947 +config GRKERNSEC_CHROOT_UNIX
47948 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
47949 + depends on GRKERNSEC_CHROOT
47950 + help
47951 + If you say Y here, processes inside a chroot will not be able to
47952 + connect to abstract (meaning not belonging to a filesystem) Unix
47953 + domain sockets that were bound outside of a chroot. It is recommended
47954 + that you say Y here. If the sysctl option is enabled, a sysctl option
47955 + with name "chroot_deny_unix" is created.
47956 +
47957 +config GRKERNSEC_CHROOT_FINDTASK
47958 + bool "Protect outside processes"
47959 + depends on GRKERNSEC_CHROOT
47960 + help
47961 + If you say Y here, processes inside a chroot will not be able to
47962 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47963 + getsid, or view any process outside of the chroot. If the sysctl
47964 + option is enabled, a sysctl option with name "chroot_findtask" is
47965 + created.
47966 +
47967 +config GRKERNSEC_CHROOT_NICE
47968 + bool "Restrict priority changes"
47969 + depends on GRKERNSEC_CHROOT
47970 + help
47971 + If you say Y here, processes inside a chroot will not be able to raise
47972 + the priority of processes in the chroot, or alter the priority of
47973 + processes outside the chroot. This provides more security than simply
47974 + removing CAP_SYS_NICE from the process' capability set. If the
47975 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47976 + is created.
47977 +
47978 +config GRKERNSEC_CHROOT_SYSCTL
47979 + bool "Deny sysctl writes"
47980 + depends on GRKERNSEC_CHROOT
47981 + help
47982 + If you say Y here, an attacker in a chroot will not be able to
47983 + write to sysctl entries, either by sysctl(2) or through a /proc
47984 + interface. It is strongly recommended that you say Y here. If the
47985 + sysctl option is enabled, a sysctl option with name
47986 + "chroot_deny_sysctl" is created.
47987 +
47988 +config GRKERNSEC_CHROOT_CAPS
47989 + bool "Capability restrictions"
47990 + depends on GRKERNSEC_CHROOT
47991 + help
47992 + If you say Y here, the capabilities on all processes within a
47993 + chroot jail will be lowered to stop module insertion, raw i/o,
47994 + system and net admin tasks, rebooting the system, modifying immutable
47995 + files, modifying IPC owned by another, and changing the system time.
47996 + This is left an option because it can break some apps. Disable this
47997 + if your chrooted apps are having problems performing those kinds of
47998 + tasks. If the sysctl option is enabled, a sysctl option with
47999 + name "chroot_caps" is created.
48000 +
48001 +endmenu
48002 +menu "Kernel Auditing"
48003 +depends on GRKERNSEC
48004 +
48005 +config GRKERNSEC_AUDIT_GROUP
48006 + bool "Single group for auditing"
48007 + help
48008 + If you say Y here, the exec, chdir, and (un)mount logging features
48009 + will only operate on a group you specify. This option is recommended
48010 + if you only want to watch certain users instead of having a large
48011 + amount of logs from the entire system. If the sysctl option is enabled,
48012 + a sysctl option with name "audit_group" is created.
48013 +
48014 +config GRKERNSEC_AUDIT_GID
48015 + int "GID for auditing"
48016 + depends on GRKERNSEC_AUDIT_GROUP
48017 + default 1007
48018 +
48019 +config GRKERNSEC_EXECLOG
48020 + bool "Exec logging"
48021 + help
48022 + If you say Y here, all execve() calls will be logged (since the
48023 + other exec*() calls are frontends to execve(), all execution
48024 + will be logged). Useful for shell-servers that like to keep track
48025 + of their users. If the sysctl option is enabled, a sysctl option with
48026 + name "exec_logging" is created.
48027 + WARNING: This option when enabled will produce a LOT of logs, especially
48028 + on an active system.
48029 +
48030 +config GRKERNSEC_RESLOG
48031 + bool "Resource logging"
48032 + help
48033 + If you say Y here, all attempts to overstep resource limits will
48034 + be logged with the resource name, the requested size, and the current
48035 + limit. It is highly recommended that you say Y here. If the sysctl
48036 + option is enabled, a sysctl option with name "resource_logging" is
48037 + created. If the RBAC system is enabled, the sysctl value is ignored.
48038 +
48039 +config GRKERNSEC_CHROOT_EXECLOG
48040 + bool "Log execs within chroot"
48041 + help
48042 + If you say Y here, all executions inside a chroot jail will be logged
48043 + to syslog. This can cause a large amount of logs if certain
48044 + applications (eg. djb's daemontools) are installed on the system, and
48045 + is therefore left as an option. If the sysctl option is enabled, a
48046 + sysctl option with name "chroot_execlog" is created.
48047 +
48048 +config GRKERNSEC_AUDIT_PTRACE
48049 + bool "Ptrace logging"
48050 + help
48051 + If you say Y here, all attempts to attach to a process via ptrace
48052 + will be logged. If the sysctl option is enabled, a sysctl option
48053 + with name "audit_ptrace" is created.
48054 +
48055 +config GRKERNSEC_AUDIT_CHDIR
48056 + bool "Chdir logging"
48057 + help
48058 + If you say Y here, all chdir() calls will be logged. If the sysctl
48059 + option is enabled, a sysctl option with name "audit_chdir" is created.
48060 +
48061 +config GRKERNSEC_AUDIT_MOUNT
48062 + bool "(Un)Mount logging"
48063 + help
48064 + If you say Y here, all mounts and unmounts will be logged. If the
48065 + sysctl option is enabled, a sysctl option with name "audit_mount" is
48066 + created.
48067 +
48068 +config GRKERNSEC_SIGNAL
48069 + bool "Signal logging"
48070 + help
48071 + If you say Y here, certain important signals will be logged, such as
48072 + SIGSEGV, which will as a result inform you of when a error in a program
48073 + occurred, which in some cases could mean a possible exploit attempt.
48074 + If the sysctl option is enabled, a sysctl option with name
48075 + "signal_logging" is created.
48076 +
48077 +config GRKERNSEC_FORKFAIL
48078 + bool "Fork failure logging"
48079 + help
48080 + If you say Y here, all failed fork() attempts will be logged.
48081 + This could suggest a fork bomb, or someone attempting to overstep
48082 + their process limit. If the sysctl option is enabled, a sysctl option
48083 + with name "forkfail_logging" is created.
48084 +
48085 +config GRKERNSEC_TIME
48086 + bool "Time change logging"
48087 + help
48088 + If you say Y here, any changes of the system clock will be logged.
48089 + If the sysctl option is enabled, a sysctl option with name
48090 + "timechange_logging" is created.
48091 +
48092 +config GRKERNSEC_PROC_IPADDR
48093 + bool "/proc/<pid>/ipaddr support"
48094 + help
48095 + If you say Y here, a new entry will be added to each /proc/<pid>
48096 + directory that contains the IP address of the person using the task.
48097 + The IP is carried across local TCP and AF_UNIX stream sockets.
48098 + This information can be useful for IDS/IPSes to perform remote response
48099 + to a local attack. The entry is readable by only the owner of the
48100 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
48101 + the RBAC system), and thus does not create privacy concerns.
48102 +
48103 +config GRKERNSEC_RWXMAP_LOG
48104 + bool 'Denied RWX mmap/mprotect logging'
48105 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
48106 + help
48107 + If you say Y here, calls to mmap() and mprotect() with explicit
48108 + usage of PROT_WRITE and PROT_EXEC together will be logged when
48109 + denied by the PAX_MPROTECT feature. If the sysctl option is
48110 + enabled, a sysctl option with name "rwxmap_logging" is created.
48111 +
48112 +config GRKERNSEC_AUDIT_TEXTREL
48113 + bool 'ELF text relocations logging (READ HELP)'
48114 + depends on PAX_MPROTECT
48115 + help
48116 + If you say Y here, text relocations will be logged with the filename
48117 + of the offending library or binary. The purpose of the feature is
48118 + to help Linux distribution developers get rid of libraries and
48119 + binaries that need text relocations which hinder the future progress
48120 + of PaX. Only Linux distribution developers should say Y here, and
48121 + never on a production machine, as this option creates an information
48122 + leak that could aid an attacker in defeating the randomization of
48123 + a single memory region. If the sysctl option is enabled, a sysctl
48124 + option with name "audit_textrel" is created.
48125 +
48126 +endmenu
48127 +
48128 +menu "Executable Protections"
48129 +depends on GRKERNSEC
48130 +
48131 +config GRKERNSEC_DMESG
48132 + bool "Dmesg(8) restriction"
48133 + help
48134 + If you say Y here, non-root users will not be able to use dmesg(8)
48135 + to view up to the last 4kb of messages in the kernel's log buffer.
48136 + The kernel's log buffer often contains kernel addresses and other
48137 + identifying information useful to an attacker in fingerprinting a
48138 + system for a targeted exploit.
48139 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
48140 + created.
48141 +
48142 +config GRKERNSEC_HARDEN_PTRACE
48143 + bool "Deter ptrace-based process snooping"
48144 + help
48145 + If you say Y here, TTY sniffers and other malicious monitoring
48146 + programs implemented through ptrace will be defeated. If you
48147 + have been using the RBAC system, this option has already been
48148 + enabled for several years for all users, with the ability to make
48149 + fine-grained exceptions.
48150 +
48151 + This option only affects the ability of non-root users to ptrace
48152 + processes that are not a descendent of the ptracing process.
48153 + This means that strace ./binary and gdb ./binary will still work,
48154 + but attaching to arbitrary processes will not. If the sysctl
48155 + option is enabled, a sysctl option with name "harden_ptrace" is
48156 + created.
48157 +
48158 +config GRKERNSEC_PTRACE_READEXEC
48159 + bool "Require read access to ptrace sensitive binaries"
48160 + help
48161 + If you say Y here, unprivileged users will not be able to ptrace unreadable
48162 + binaries. This option is useful in environments that
48163 + remove the read bits (e.g. file mode 4711) from suid binaries to
48164 + prevent infoleaking of their contents. This option adds
48165 + consistency to the use of that file mode, as the binary could normally
48166 + be read out when run without privileges while ptracing.
48167 +
48168 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
48169 + is created.
48170 +
48171 +config GRKERNSEC_SETXID
48172 + bool "Enforce consistent multithreaded privileges"
48173 + help
48174 + If you say Y here, a change from a root uid to a non-root uid
48175 + in a multithreaded application will cause the resulting uids,
48176 + gids, supplementary groups, and capabilities in that thread
48177 + to be propagated to the other threads of the process. In most
48178 + cases this is unnecessary, as glibc will emulate this behavior
48179 + on behalf of the application. Other libcs do not act in the
48180 + same way, allowing the other threads of the process to continue
48181 + running with root privileges. If the sysctl option is enabled,
48182 + a sysctl option with name "consistent_setxid" is created.
48183 +
48184 +config GRKERNSEC_TPE
48185 + bool "Trusted Path Execution (TPE)"
48186 + help
48187 + If you say Y here, you will be able to choose a gid to add to the
48188 + supplementary groups of users you want to mark as "untrusted."
48189 + These users will not be able to execute any files that are not in
48190 + root-owned directories writable only by root. If the sysctl option
48191 + is enabled, a sysctl option with name "tpe" is created.
48192 +
48193 +config GRKERNSEC_TPE_ALL
48194 + bool "Partially restrict all non-root users"
48195 + depends on GRKERNSEC_TPE
48196 + help
48197 + If you say Y here, all non-root users will be covered under
48198 + a weaker TPE restriction. This is separate from, and in addition to,
48199 + the main TPE options that you have selected elsewhere. Thus, if a
48200 + "trusted" GID is chosen, this restriction applies to even that GID.
48201 + Under this restriction, all non-root users will only be allowed to
48202 + execute files in directories they own that are not group or
48203 + world-writable, or in directories owned by root and writable only by
48204 + root. If the sysctl option is enabled, a sysctl option with name
48205 + "tpe_restrict_all" is created.
48206 +
48207 +config GRKERNSEC_TPE_INVERT
48208 + bool "Invert GID option"
48209 + depends on GRKERNSEC_TPE
48210 + help
48211 + If you say Y here, the group you specify in the TPE configuration will
48212 + decide what group TPE restrictions will be *disabled* for. This
48213 + option is useful if you want TPE restrictions to be applied to most
48214 + users on the system. If the sysctl option is enabled, a sysctl option
48215 + with name "tpe_invert" is created. Unlike other sysctl options, this
48216 + entry will default to on for backward-compatibility.
48217 +
48218 +config GRKERNSEC_TPE_GID
48219 + int "GID for untrusted users"
48220 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48221 + default 1005
48222 + help
48223 + Setting this GID determines what group TPE restrictions will be
48224 + *enabled* for. If the sysctl option is enabled, a sysctl option
48225 + with name "tpe_gid" is created.
48226 +
48227 +config GRKERNSEC_TPE_GID
48228 + int "GID for trusted users"
48229 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48230 + default 1005
48231 + help
48232 + Setting this GID determines what group TPE restrictions will be
48233 + *disabled* for. If the sysctl option is enabled, a sysctl option
48234 + with name "tpe_gid" is created.
48235 +
48236 +endmenu
48237 +menu "Network Protections"
48238 +depends on GRKERNSEC
48239 +
48240 +config GRKERNSEC_RANDNET
48241 + bool "Larger entropy pools"
48242 + help
48243 + If you say Y here, the entropy pools used for many features of Linux
48244 + and grsecurity will be doubled in size. Since several grsecurity
48245 + features use additional randomness, it is recommended that you say Y
48246 + here. Saying Y here has a similar effect as modifying
48247 + /proc/sys/kernel/random/poolsize.
48248 +
48249 +config GRKERNSEC_BLACKHOLE
48250 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48251 + depends on NET
48252 + help
48253 + If you say Y here, neither TCP resets nor ICMP
48254 + destination-unreachable packets will be sent in response to packets
48255 + sent to ports for which no associated listening process exists.
48256 + This feature supports both IPV4 and IPV6 and exempts the
48257 + loopback interface from blackholing. Enabling this feature
48258 + makes a host more resilient to DoS attacks and reduces network
48259 + visibility against scanners.
48260 +
48261 + The blackhole feature as-implemented is equivalent to the FreeBSD
48262 + blackhole feature, as it prevents RST responses to all packets, not
48263 + just SYNs. Under most application behavior this causes no
48264 + problems, but applications (like haproxy) may not close certain
48265 + connections in a way that cleanly terminates them on the remote
48266 + end, leaving the remote host in LAST_ACK state. Because of this
48267 + side-effect and to prevent intentional LAST_ACK DoSes, this
48268 + feature also adds automatic mitigation against such attacks.
48269 + The mitigation drastically reduces the amount of time a socket
48270 + can spend in LAST_ACK state. If you're using haproxy and not
48271 + all servers it connects to have this option enabled, consider
48272 + disabling this feature on the haproxy host.
48273 +
48274 + If the sysctl option is enabled, two sysctl options with names
48275 + "ip_blackhole" and "lastack_retries" will be created.
48276 + While "ip_blackhole" takes the standard zero/non-zero on/off
48277 + toggle, "lastack_retries" uses the same kinds of values as
48278 + "tcp_retries1" and "tcp_retries2". The default value of 4
48279 + prevents a socket from lasting more than 45 seconds in LAST_ACK
48280 + state.
48281 +
48282 +config GRKERNSEC_SOCKET
48283 + bool "Socket restrictions"
48284 + depends on NET
48285 + help
48286 + If you say Y here, you will be able to choose from several options.
48287 + If you assign a GID on your system and add it to the supplementary
48288 + groups of users you want to restrict socket access to, this patch
48289 + will perform up to three things, based on the option(s) you choose.
48290 +
48291 +config GRKERNSEC_SOCKET_ALL
48292 + bool "Deny any sockets to group"
48293 + depends on GRKERNSEC_SOCKET
48294 + help
48295 + If you say Y here, you will be able to choose a GID of whose users will
48296 + be unable to connect to other hosts from your machine or run server
48297 + applications from your machine. If the sysctl option is enabled, a
48298 + sysctl option with name "socket_all" is created.
48299 +
48300 +config GRKERNSEC_SOCKET_ALL_GID
48301 + int "GID to deny all sockets for"
48302 + depends on GRKERNSEC_SOCKET_ALL
48303 + default 1004
48304 + help
48305 + Here you can choose the GID to disable socket access for. Remember to
48306 + add the users you want socket access disabled for to the GID
48307 + specified here. If the sysctl option is enabled, a sysctl option
48308 + with name "socket_all_gid" is created.
48309 +
48310 +config GRKERNSEC_SOCKET_CLIENT
48311 + bool "Deny client sockets to group"
48312 + depends on GRKERNSEC_SOCKET
48313 + help
48314 + If you say Y here, you will be able to choose a GID of whose users will
48315 + be unable to connect to other hosts from your machine, but will be
48316 + able to run servers. If this option is enabled, all users in the group
48317 + you specify will have to use passive mode when initiating ftp transfers
48318 + from the shell on your machine. If the sysctl option is enabled, a
48319 + sysctl option with name "socket_client" is created.
48320 +
48321 +config GRKERNSEC_SOCKET_CLIENT_GID
48322 + int "GID to deny client sockets for"
48323 + depends on GRKERNSEC_SOCKET_CLIENT
48324 + default 1003
48325 + help
48326 + Here you can choose the GID to disable client socket access for.
48327 + Remember to add the users you want client socket access disabled for to
48328 + the GID specified here. If the sysctl option is enabled, a sysctl
48329 + option with name "socket_client_gid" is created.
48330 +
48331 +config GRKERNSEC_SOCKET_SERVER
48332 + bool "Deny server sockets to group"
48333 + depends on GRKERNSEC_SOCKET
48334 + help
48335 + If you say Y here, you will be able to choose a GID of whose users will
48336 + be unable to run server applications from your machine. If the sysctl
48337 + option is enabled, a sysctl option with name "socket_server" is created.
48338 +
48339 +config GRKERNSEC_SOCKET_SERVER_GID
48340 + int "GID to deny server sockets for"
48341 + depends on GRKERNSEC_SOCKET_SERVER
48342 + default 1002
48343 + help
48344 + Here you can choose the GID to disable server socket access for.
48345 + Remember to add the users you want server socket access disabled for to
48346 + the GID specified here. If the sysctl option is enabled, a sysctl
48347 + option with name "socket_server_gid" is created.
48348 +
48349 +endmenu
48350 +menu "Sysctl support"
48351 +depends on GRKERNSEC && SYSCTL
48352 +
48353 +config GRKERNSEC_SYSCTL
48354 + bool "Sysctl support"
48355 + help
48356 + If you say Y here, you will be able to change the options that
48357 + grsecurity runs with at bootup, without having to recompile your
48358 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48359 + to enable (1) or disable (0) various features. All the sysctl entries
48360 + are mutable until the "grsec_lock" entry is set to a non-zero value.
48361 + All features enabled in the kernel configuration are disabled at boot
48362 + if you do not say Y to the "Turn on features by default" option.
48363 + All options should be set at startup, and the grsec_lock entry should
48364 + be set to a non-zero value after all the options are set.
48365 + *THIS IS EXTREMELY IMPORTANT*
48366 +
48367 +config GRKERNSEC_SYSCTL_DISTRO
48368 + bool "Extra sysctl support for distro makers (READ HELP)"
48369 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48370 + help
48371 + If you say Y here, additional sysctl options will be created
48372 + for features that affect processes running as root. Therefore,
48373 + it is critical when using this option that the grsec_lock entry be
48374 + enabled after boot. Only distros with prebuilt kernel packages
48375 + with this option enabled that can ensure grsec_lock is enabled
48376 + after boot should use this option.
48377 + *Failure to set grsec_lock after boot makes all grsec features
48378 + this option covers useless*
48379 +
48380 + Currently this option creates the following sysctl entries:
48381 + "Disable Privileged I/O": "disable_priv_io"
48382 +
48383 +config GRKERNSEC_SYSCTL_ON
48384 + bool "Turn on features by default"
48385 + depends on GRKERNSEC_SYSCTL
48386 + help
48387 + If you say Y here, instead of having all features enabled in the
48388 + kernel configuration disabled at boot time, the features will be
48389 + enabled at boot time. It is recommended you say Y here unless
48390 + there is some reason you would want all sysctl-tunable features to
48391 + be disabled by default. As mentioned elsewhere, it is important
48392 + to enable the grsec_lock entry once you have finished modifying
48393 + the sysctl entries.
48394 +
48395 +endmenu
48396 +menu "Logging Options"
48397 +depends on GRKERNSEC
48398 +
48399 +config GRKERNSEC_FLOODTIME
48400 + int "Seconds in between log messages (minimum)"
48401 + default 10
48402 + help
48403 + This option allows you to enforce the number of seconds between
48404 + grsecurity log messages. The default should be suitable for most
48405 + people, however, if you choose to change it, choose a value small enough
48406 + to allow informative logs to be produced, but large enough to
48407 + prevent flooding.
48408 +
48409 +config GRKERNSEC_FLOODBURST
48410 + int "Number of messages in a burst (maximum)"
48411 + default 6
48412 + help
48413 + This option allows you to choose the maximum number of messages allowed
48414 + within the flood time interval you chose in a separate option. The
48415 + default should be suitable for most people, however if you find that
48416 + many of your logs are being interpreted as flooding, you may want to
48417 + raise this value.
48418 +
48419 +endmenu
48420 +
48421 +endmenu
48422 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48423 new file mode 100644
48424 index 0000000..496e60d
48425 --- /dev/null
48426 +++ b/grsecurity/Makefile
48427 @@ -0,0 +1,40 @@
48428 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48429 +# during 2001-2009 it has been completely redesigned by Brad Spengler
48430 +# into an RBAC system
48431 +#
48432 +# All code in this directory and various hooks inserted throughout the kernel
48433 +# are copyright Brad Spengler - Open Source Security, Inc., and released
48434 +# under the GPL v2 or higher
48435 +
48436 +ifndef CONFIG_IA64
48437 +KBUILD_CFLAGS += -Werror
48438 +endif
48439 +
48440 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48441 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
48442 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48443 +
48444 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48445 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48446 + gracl_learn.o grsec_log.o
48447 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48448 +
48449 +ifdef CONFIG_NET
48450 +obj-y += grsec_sock.o
48451 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48452 +endif
48453 +
48454 +ifndef CONFIG_GRKERNSEC
48455 +obj-y += grsec_disabled.o
48456 +endif
48457 +
48458 +ifdef CONFIG_GRKERNSEC_HIDESYM
48459 +extra-y := grsec_hidesym.o
48460 +$(obj)/grsec_hidesym.o:
48461 + @-chmod -f 500 /boot
48462 + @-chmod -f 500 /lib/modules
48463 + @-chmod -f 500 /lib64/modules
48464 + @-chmod -f 500 /lib32/modules
48465 + @-chmod -f 700 .
48466 + @echo ' grsec: protected kernel image paths'
48467 +endif
48468 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48469 new file mode 100644
48470 index 0000000..7715893
48471 --- /dev/null
48472 +++ b/grsecurity/gracl.c
48473 @@ -0,0 +1,4164 @@
48474 +#include <linux/kernel.h>
48475 +#include <linux/module.h>
48476 +#include <linux/sched.h>
48477 +#include <linux/mm.h>
48478 +#include <linux/file.h>
48479 +#include <linux/fs.h>
48480 +#include <linux/namei.h>
48481 +#include <linux/mount.h>
48482 +#include <linux/tty.h>
48483 +#include <linux/proc_fs.h>
48484 +#include <linux/lglock.h>
48485 +#include <linux/slab.h>
48486 +#include <linux/vmalloc.h>
48487 +#include <linux/types.h>
48488 +#include <linux/sysctl.h>
48489 +#include <linux/netdevice.h>
48490 +#include <linux/ptrace.h>
48491 +#include <linux/gracl.h>
48492 +#include <linux/gralloc.h>
48493 +#include <linux/security.h>
48494 +#include <linux/grinternal.h>
48495 +#include <linux/pid_namespace.h>
48496 +#include <linux/fdtable.h>
48497 +#include <linux/percpu.h>
48498 +
48499 +#include <asm/uaccess.h>
48500 +#include <asm/errno.h>
48501 +#include <asm/mman.h>
48502 +
48503 +static struct acl_role_db acl_role_set;
48504 +static struct name_db name_set;
48505 +static struct inodev_db inodev_set;
48506 +
48507 +/* for keeping track of userspace pointers used for subjects, so we
48508 + can share references in the kernel as well
48509 +*/
48510 +
48511 +static struct path real_root;
48512 +
48513 +static struct acl_subj_map_db subj_map_set;
48514 +
48515 +static struct acl_role_label *default_role;
48516 +
48517 +static struct acl_role_label *role_list;
48518 +
48519 +static u16 acl_sp_role_value;
48520 +
48521 +extern char *gr_shared_page[4];
48522 +static DEFINE_MUTEX(gr_dev_mutex);
48523 +DEFINE_RWLOCK(gr_inode_lock);
48524 +
48525 +struct gr_arg *gr_usermode;
48526 +
48527 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
48528 +
48529 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48530 +extern void gr_clear_learn_entries(void);
48531 +
48532 +#ifdef CONFIG_GRKERNSEC_RESLOG
48533 +extern void gr_log_resource(const struct task_struct *task,
48534 + const int res, const unsigned long wanted, const int gt);
48535 +#endif
48536 +
48537 +unsigned char *gr_system_salt;
48538 +unsigned char *gr_system_sum;
48539 +
48540 +static struct sprole_pw **acl_special_roles = NULL;
48541 +static __u16 num_sprole_pws = 0;
48542 +
48543 +static struct acl_role_label *kernel_role = NULL;
48544 +
48545 +static unsigned int gr_auth_attempts = 0;
48546 +static unsigned long gr_auth_expires = 0UL;
48547 +
48548 +#ifdef CONFIG_NET
48549 +extern struct vfsmount *sock_mnt;
48550 +#endif
48551 +
48552 +extern struct vfsmount *pipe_mnt;
48553 +extern struct vfsmount *shm_mnt;
48554 +#ifdef CONFIG_HUGETLBFS
48555 +extern struct vfsmount *hugetlbfs_vfsmount;
48556 +#endif
48557 +
48558 +static struct acl_object_label *fakefs_obj_rw;
48559 +static struct acl_object_label *fakefs_obj_rwx;
48560 +
48561 +extern int gr_init_uidset(void);
48562 +extern void gr_free_uidset(void);
48563 +extern void gr_remove_uid(uid_t uid);
48564 +extern int gr_find_uid(uid_t uid);
48565 +
48566 +DECLARE_BRLOCK(vfsmount_lock);
48567 +
48568 +__inline__ int
48569 +gr_acl_is_enabled(void)
48570 +{
48571 + return (gr_status & GR_READY);
48572 +}
48573 +
48574 +#ifdef CONFIG_BTRFS_FS
48575 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48576 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48577 +#endif
48578 +
48579 +static inline dev_t __get_dev(const struct dentry *dentry)
48580 +{
48581 +#ifdef CONFIG_BTRFS_FS
48582 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48583 + return get_btrfs_dev_from_inode(dentry->d_inode);
48584 + else
48585 +#endif
48586 + return dentry->d_inode->i_sb->s_dev;
48587 +}
48588 +
48589 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48590 +{
48591 + return __get_dev(dentry);
48592 +}
48593 +
48594 +static char gr_task_roletype_to_char(struct task_struct *task)
48595 +{
48596 + switch (task->role->roletype &
48597 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48598 + GR_ROLE_SPECIAL)) {
48599 + case GR_ROLE_DEFAULT:
48600 + return 'D';
48601 + case GR_ROLE_USER:
48602 + return 'U';
48603 + case GR_ROLE_GROUP:
48604 + return 'G';
48605 + case GR_ROLE_SPECIAL:
48606 + return 'S';
48607 + }
48608 +
48609 + return 'X';
48610 +}
48611 +
48612 +char gr_roletype_to_char(void)
48613 +{
48614 + return gr_task_roletype_to_char(current);
48615 +}
48616 +
48617 +__inline__ int
48618 +gr_acl_tpe_check(void)
48619 +{
48620 + if (unlikely(!(gr_status & GR_READY)))
48621 + return 0;
48622 + if (current->role->roletype & GR_ROLE_TPE)
48623 + return 1;
48624 + else
48625 + return 0;
48626 +}
48627 +
48628 +int
48629 +gr_handle_rawio(const struct inode *inode)
48630 +{
48631 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48632 + if (inode && S_ISBLK(inode->i_mode) &&
48633 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48634 + !capable(CAP_SYS_RAWIO))
48635 + return 1;
48636 +#endif
48637 + return 0;
48638 +}
48639 +
48640 +static int
48641 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48642 +{
48643 + if (likely(lena != lenb))
48644 + return 0;
48645 +
48646 + return !memcmp(a, b, lena);
48647 +}
48648 +
48649 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48650 +{
48651 + *buflen -= namelen;
48652 + if (*buflen < 0)
48653 + return -ENAMETOOLONG;
48654 + *buffer -= namelen;
48655 + memcpy(*buffer, str, namelen);
48656 + return 0;
48657 +}
48658 +
48659 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48660 +{
48661 + return prepend(buffer, buflen, name->name, name->len);
48662 +}
48663 +
48664 +static int prepend_path(const struct path *path, struct path *root,
48665 + char **buffer, int *buflen)
48666 +{
48667 + struct dentry *dentry = path->dentry;
48668 + struct vfsmount *vfsmnt = path->mnt;
48669 + bool slash = false;
48670 + int error = 0;
48671 +
48672 + while (dentry != root->dentry || vfsmnt != root->mnt) {
48673 + struct dentry * parent;
48674 +
48675 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48676 + /* Global root? */
48677 + if (vfsmnt->mnt_parent == vfsmnt) {
48678 + goto out;
48679 + }
48680 + dentry = vfsmnt->mnt_mountpoint;
48681 + vfsmnt = vfsmnt->mnt_parent;
48682 + continue;
48683 + }
48684 + parent = dentry->d_parent;
48685 + prefetch(parent);
48686 + spin_lock(&dentry->d_lock);
48687 + error = prepend_name(buffer, buflen, &dentry->d_name);
48688 + spin_unlock(&dentry->d_lock);
48689 + if (!error)
48690 + error = prepend(buffer, buflen, "/", 1);
48691 + if (error)
48692 + break;
48693 +
48694 + slash = true;
48695 + dentry = parent;
48696 + }
48697 +
48698 +out:
48699 + if (!error && !slash)
48700 + error = prepend(buffer, buflen, "/", 1);
48701 +
48702 + return error;
48703 +}
48704 +
48705 +/* this must be called with vfsmount_lock and rename_lock held */
48706 +
48707 +static char *__our_d_path(const struct path *path, struct path *root,
48708 + char *buf, int buflen)
48709 +{
48710 + char *res = buf + buflen;
48711 + int error;
48712 +
48713 + prepend(&res, &buflen, "\0", 1);
48714 + error = prepend_path(path, root, &res, &buflen);
48715 + if (error)
48716 + return ERR_PTR(error);
48717 +
48718 + return res;
48719 +}
48720 +
48721 +static char *
48722 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48723 +{
48724 + char *retval;
48725 +
48726 + retval = __our_d_path(path, root, buf, buflen);
48727 + if (unlikely(IS_ERR(retval)))
48728 + retval = strcpy(buf, "<path too long>");
48729 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48730 + retval[1] = '\0';
48731 +
48732 + return retval;
48733 +}
48734 +
48735 +static char *
48736 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48737 + char *buf, int buflen)
48738 +{
48739 + struct path path;
48740 + char *res;
48741 +
48742 + path.dentry = (struct dentry *)dentry;
48743 + path.mnt = (struct vfsmount *)vfsmnt;
48744 +
48745 + /* we can use real_root.dentry, real_root.mnt, because this is only called
48746 + by the RBAC system */
48747 + res = gen_full_path(&path, &real_root, buf, buflen);
48748 +
48749 + return res;
48750 +}
48751 +
48752 +static char *
48753 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48754 + char *buf, int buflen)
48755 +{
48756 + char *res;
48757 + struct path path;
48758 + struct path root;
48759 + struct task_struct *reaper = &init_task;
48760 +
48761 + path.dentry = (struct dentry *)dentry;
48762 + path.mnt = (struct vfsmount *)vfsmnt;
48763 +
48764 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48765 + get_fs_root(reaper->fs, &root);
48766 +
48767 + write_seqlock(&rename_lock);
48768 + br_read_lock(vfsmount_lock);
48769 + res = gen_full_path(&path, &root, buf, buflen);
48770 + br_read_unlock(vfsmount_lock);
48771 + write_sequnlock(&rename_lock);
48772 +
48773 + path_put(&root);
48774 + return res;
48775 +}
48776 +
48777 +static char *
48778 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48779 +{
48780 + char *ret;
48781 + write_seqlock(&rename_lock);
48782 + br_read_lock(vfsmount_lock);
48783 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48784 + PAGE_SIZE);
48785 + br_read_unlock(vfsmount_lock);
48786 + write_sequnlock(&rename_lock);
48787 + return ret;
48788 +}
48789 +
48790 +static char *
48791 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48792 +{
48793 + char *ret;
48794 + char *buf;
48795 + int buflen;
48796 +
48797 + write_seqlock(&rename_lock);
48798 + br_read_lock(vfsmount_lock);
48799 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48800 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48801 + buflen = (int)(ret - buf);
48802 + if (buflen >= 5)
48803 + prepend(&ret, &buflen, "/proc", 5);
48804 + else
48805 + ret = strcpy(buf, "<path too long>");
48806 + br_read_unlock(vfsmount_lock);
48807 + write_sequnlock(&rename_lock);
48808 + return ret;
48809 +}
48810 +
48811 +char *
48812 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48813 +{
48814 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48815 + PAGE_SIZE);
48816 +}
48817 +
48818 +char *
48819 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48820 +{
48821 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48822 + PAGE_SIZE);
48823 +}
48824 +
48825 +char *
48826 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48827 +{
48828 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48829 + PAGE_SIZE);
48830 +}
48831 +
48832 +char *
48833 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48834 +{
48835 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48836 + PAGE_SIZE);
48837 +}
48838 +
48839 +char *
48840 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48841 +{
48842 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48843 + PAGE_SIZE);
48844 +}
48845 +
48846 +__inline__ __u32
48847 +to_gr_audit(const __u32 reqmode)
48848 +{
48849 + /* masks off auditable permission flags, then shifts them to create
48850 + auditing flags, and adds the special case of append auditing if
48851 + we're requesting write */
48852 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48853 +}
48854 +
48855 +struct acl_subject_label *
48856 +lookup_subject_map(const struct acl_subject_label *userp)
48857 +{
48858 + unsigned int index = shash(userp, subj_map_set.s_size);
48859 + struct subject_map *match;
48860 +
48861 + match = subj_map_set.s_hash[index];
48862 +
48863 + while (match && match->user != userp)
48864 + match = match->next;
48865 +
48866 + if (match != NULL)
48867 + return match->kernel;
48868 + else
48869 + return NULL;
48870 +}
48871 +
48872 +static void
48873 +insert_subj_map_entry(struct subject_map *subjmap)
48874 +{
48875 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48876 + struct subject_map **curr;
48877 +
48878 + subjmap->prev = NULL;
48879 +
48880 + curr = &subj_map_set.s_hash[index];
48881 + if (*curr != NULL)
48882 + (*curr)->prev = subjmap;
48883 +
48884 + subjmap->next = *curr;
48885 + *curr = subjmap;
48886 +
48887 + return;
48888 +}
48889 +
48890 +static struct acl_role_label *
48891 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48892 + const gid_t gid)
48893 +{
48894 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48895 + struct acl_role_label *match;
48896 + struct role_allowed_ip *ipp;
48897 + unsigned int x;
48898 + u32 curr_ip = task->signal->curr_ip;
48899 +
48900 + task->signal->saved_ip = curr_ip;
48901 +
48902 + match = acl_role_set.r_hash[index];
48903 +
48904 + while (match) {
48905 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48906 + for (x = 0; x < match->domain_child_num; x++) {
48907 + if (match->domain_children[x] == uid)
48908 + goto found;
48909 + }
48910 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48911 + break;
48912 + match = match->next;
48913 + }
48914 +found:
48915 + if (match == NULL) {
48916 + try_group:
48917 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48918 + match = acl_role_set.r_hash[index];
48919 +
48920 + while (match) {
48921 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48922 + for (x = 0; x < match->domain_child_num; x++) {
48923 + if (match->domain_children[x] == gid)
48924 + goto found2;
48925 + }
48926 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48927 + break;
48928 + match = match->next;
48929 + }
48930 +found2:
48931 + if (match == NULL)
48932 + match = default_role;
48933 + if (match->allowed_ips == NULL)
48934 + return match;
48935 + else {
48936 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48937 + if (likely
48938 + ((ntohl(curr_ip) & ipp->netmask) ==
48939 + (ntohl(ipp->addr) & ipp->netmask)))
48940 + return match;
48941 + }
48942 + match = default_role;
48943 + }
48944 + } else if (match->allowed_ips == NULL) {
48945 + return match;
48946 + } else {
48947 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48948 + if (likely
48949 + ((ntohl(curr_ip) & ipp->netmask) ==
48950 + (ntohl(ipp->addr) & ipp->netmask)))
48951 + return match;
48952 + }
48953 + goto try_group;
48954 + }
48955 +
48956 + return match;
48957 +}
48958 +
48959 +struct acl_subject_label *
48960 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48961 + const struct acl_role_label *role)
48962 +{
48963 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48964 + struct acl_subject_label *match;
48965 +
48966 + match = role->subj_hash[index];
48967 +
48968 + while (match && (match->inode != ino || match->device != dev ||
48969 + (match->mode & GR_DELETED))) {
48970 + match = match->next;
48971 + }
48972 +
48973 + if (match && !(match->mode & GR_DELETED))
48974 + return match;
48975 + else
48976 + return NULL;
48977 +}
48978 +
48979 +struct acl_subject_label *
48980 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48981 + const struct acl_role_label *role)
48982 +{
48983 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48984 + struct acl_subject_label *match;
48985 +
48986 + match = role->subj_hash[index];
48987 +
48988 + while (match && (match->inode != ino || match->device != dev ||
48989 + !(match->mode & GR_DELETED))) {
48990 + match = match->next;
48991 + }
48992 +
48993 + if (match && (match->mode & GR_DELETED))
48994 + return match;
48995 + else
48996 + return NULL;
48997 +}
48998 +
48999 +static struct acl_object_label *
49000 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
49001 + const struct acl_subject_label *subj)
49002 +{
49003 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
49004 + struct acl_object_label *match;
49005 +
49006 + match = subj->obj_hash[index];
49007 +
49008 + while (match && (match->inode != ino || match->device != dev ||
49009 + (match->mode & GR_DELETED))) {
49010 + match = match->next;
49011 + }
49012 +
49013 + if (match && !(match->mode & GR_DELETED))
49014 + return match;
49015 + else
49016 + return NULL;
49017 +}
49018 +
49019 +static struct acl_object_label *
49020 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
49021 + const struct acl_subject_label *subj)
49022 +{
49023 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
49024 + struct acl_object_label *match;
49025 +
49026 + match = subj->obj_hash[index];
49027 +
49028 + while (match && (match->inode != ino || match->device != dev ||
49029 + !(match->mode & GR_DELETED))) {
49030 + match = match->next;
49031 + }
49032 +
49033 + if (match && (match->mode & GR_DELETED))
49034 + return match;
49035 +
49036 + match = subj->obj_hash[index];
49037 +
49038 + while (match && (match->inode != ino || match->device != dev ||
49039 + (match->mode & GR_DELETED))) {
49040 + match = match->next;
49041 + }
49042 +
49043 + if (match && !(match->mode & GR_DELETED))
49044 + return match;
49045 + else
49046 + return NULL;
49047 +}
49048 +
49049 +static struct name_entry *
49050 +lookup_name_entry(const char *name)
49051 +{
49052 + unsigned int len = strlen(name);
49053 + unsigned int key = full_name_hash(name, len);
49054 + unsigned int index = key % name_set.n_size;
49055 + struct name_entry *match;
49056 +
49057 + match = name_set.n_hash[index];
49058 +
49059 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
49060 + match = match->next;
49061 +
49062 + return match;
49063 +}
49064 +
49065 +static struct name_entry *
49066 +lookup_name_entry_create(const char *name)
49067 +{
49068 + unsigned int len = strlen(name);
49069 + unsigned int key = full_name_hash(name, len);
49070 + unsigned int index = key % name_set.n_size;
49071 + struct name_entry *match;
49072 +
49073 + match = name_set.n_hash[index];
49074 +
49075 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49076 + !match->deleted))
49077 + match = match->next;
49078 +
49079 + if (match && match->deleted)
49080 + return match;
49081 +
49082 + match = name_set.n_hash[index];
49083 +
49084 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49085 + match->deleted))
49086 + match = match->next;
49087 +
49088 + if (match && !match->deleted)
49089 + return match;
49090 + else
49091 + return NULL;
49092 +}
49093 +
49094 +static struct inodev_entry *
49095 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
49096 +{
49097 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
49098 + struct inodev_entry *match;
49099 +
49100 + match = inodev_set.i_hash[index];
49101 +
49102 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
49103 + match = match->next;
49104 +
49105 + return match;
49106 +}
49107 +
49108 +static void
49109 +insert_inodev_entry(struct inodev_entry *entry)
49110 +{
49111 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
49112 + inodev_set.i_size);
49113 + struct inodev_entry **curr;
49114 +
49115 + entry->prev = NULL;
49116 +
49117 + curr = &inodev_set.i_hash[index];
49118 + if (*curr != NULL)
49119 + (*curr)->prev = entry;
49120 +
49121 + entry->next = *curr;
49122 + *curr = entry;
49123 +
49124 + return;
49125 +}
49126 +
49127 +static void
49128 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
49129 +{
49130 + unsigned int index =
49131 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
49132 + struct acl_role_label **curr;
49133 + struct acl_role_label *tmp;
49134 +
49135 + curr = &acl_role_set.r_hash[index];
49136 +
49137 + /* if role was already inserted due to domains and already has
49138 + a role in the same bucket as it attached, then we need to
49139 + combine these two buckets
49140 + */
49141 + if (role->next) {
49142 + tmp = role->next;
49143 + while (tmp->next)
49144 + tmp = tmp->next;
49145 + tmp->next = *curr;
49146 + } else
49147 + role->next = *curr;
49148 + *curr = role;
49149 +
49150 + return;
49151 +}
49152 +
49153 +static void
49154 +insert_acl_role_label(struct acl_role_label *role)
49155 +{
49156 + int i;
49157 +
49158 + if (role_list == NULL) {
49159 + role_list = role;
49160 + role->prev = NULL;
49161 + } else {
49162 + role->prev = role_list;
49163 + role_list = role;
49164 + }
49165 +
49166 + /* used for hash chains */
49167 + role->next = NULL;
49168 +
49169 + if (role->roletype & GR_ROLE_DOMAIN) {
49170 + for (i = 0; i < role->domain_child_num; i++)
49171 + __insert_acl_role_label(role, role->domain_children[i]);
49172 + } else
49173 + __insert_acl_role_label(role, role->uidgid);
49174 +}
49175 +
49176 +static int
49177 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
49178 +{
49179 + struct name_entry **curr, *nentry;
49180 + struct inodev_entry *ientry;
49181 + unsigned int len = strlen(name);
49182 + unsigned int key = full_name_hash(name, len);
49183 + unsigned int index = key % name_set.n_size;
49184 +
49185 + curr = &name_set.n_hash[index];
49186 +
49187 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49188 + curr = &((*curr)->next);
49189 +
49190 + if (*curr != NULL)
49191 + return 1;
49192 +
49193 + nentry = acl_alloc(sizeof (struct name_entry));
49194 + if (nentry == NULL)
49195 + return 0;
49196 + ientry = acl_alloc(sizeof (struct inodev_entry));
49197 + if (ientry == NULL)
49198 + return 0;
49199 + ientry->nentry = nentry;
49200 +
49201 + nentry->key = key;
49202 + nentry->name = name;
49203 + nentry->inode = inode;
49204 + nentry->device = device;
49205 + nentry->len = len;
49206 + nentry->deleted = deleted;
49207 +
49208 + nentry->prev = NULL;
49209 + curr = &name_set.n_hash[index];
49210 + if (*curr != NULL)
49211 + (*curr)->prev = nentry;
49212 + nentry->next = *curr;
49213 + *curr = nentry;
49214 +
49215 + /* insert us into the table searchable by inode/dev */
49216 + insert_inodev_entry(ientry);
49217 +
49218 + return 1;
49219 +}
49220 +
49221 +static void
49222 +insert_acl_obj_label(struct acl_object_label *obj,
49223 + struct acl_subject_label *subj)
49224 +{
49225 + unsigned int index =
49226 + fhash(obj->inode, obj->device, subj->obj_hash_size);
49227 + struct acl_object_label **curr;
49228 +
49229 +
49230 + obj->prev = NULL;
49231 +
49232 + curr = &subj->obj_hash[index];
49233 + if (*curr != NULL)
49234 + (*curr)->prev = obj;
49235 +
49236 + obj->next = *curr;
49237 + *curr = obj;
49238 +
49239 + return;
49240 +}
49241 +
49242 +static void
49243 +insert_acl_subj_label(struct acl_subject_label *obj,
49244 + struct acl_role_label *role)
49245 +{
49246 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49247 + struct acl_subject_label **curr;
49248 +
49249 + obj->prev = NULL;
49250 +
49251 + curr = &role->subj_hash[index];
49252 + if (*curr != NULL)
49253 + (*curr)->prev = obj;
49254 +
49255 + obj->next = *curr;
49256 + *curr = obj;
49257 +
49258 + return;
49259 +}
49260 +
49261 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49262 +
49263 +static void *
49264 +create_table(__u32 * len, int elementsize)
49265 +{
49266 + unsigned int table_sizes[] = {
49267 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49268 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49269 + 4194301, 8388593, 16777213, 33554393, 67108859
49270 + };
49271 + void *newtable = NULL;
49272 + unsigned int pwr = 0;
49273 +
49274 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49275 + table_sizes[pwr] <= *len)
49276 + pwr++;
49277 +
49278 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49279 + return newtable;
49280 +
49281 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49282 + newtable =
49283 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49284 + else
49285 + newtable = vmalloc(table_sizes[pwr] * elementsize);
49286 +
49287 + *len = table_sizes[pwr];
49288 +
49289 + return newtable;
49290 +}
49291 +
49292 +static int
49293 +init_variables(const struct gr_arg *arg)
49294 +{
49295 + struct task_struct *reaper = &init_task;
49296 + unsigned int stacksize;
49297 +
49298 + subj_map_set.s_size = arg->role_db.num_subjects;
49299 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49300 + name_set.n_size = arg->role_db.num_objects;
49301 + inodev_set.i_size = arg->role_db.num_objects;
49302 +
49303 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
49304 + !name_set.n_size || !inodev_set.i_size)
49305 + return 1;
49306 +
49307 + if (!gr_init_uidset())
49308 + return 1;
49309 +
49310 + /* set up the stack that holds allocation info */
49311 +
49312 + stacksize = arg->role_db.num_pointers + 5;
49313 +
49314 + if (!acl_alloc_stack_init(stacksize))
49315 + return 1;
49316 +
49317 + /* grab reference for the real root dentry and vfsmount */
49318 + get_fs_root(reaper->fs, &real_root);
49319 +
49320 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49321 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49322 +#endif
49323 +
49324 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49325 + if (fakefs_obj_rw == NULL)
49326 + return 1;
49327 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49328 +
49329 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49330 + if (fakefs_obj_rwx == NULL)
49331 + return 1;
49332 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49333 +
49334 + subj_map_set.s_hash =
49335 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49336 + acl_role_set.r_hash =
49337 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49338 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49339 + inodev_set.i_hash =
49340 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49341 +
49342 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49343 + !name_set.n_hash || !inodev_set.i_hash)
49344 + return 1;
49345 +
49346 + memset(subj_map_set.s_hash, 0,
49347 + sizeof(struct subject_map *) * subj_map_set.s_size);
49348 + memset(acl_role_set.r_hash, 0,
49349 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
49350 + memset(name_set.n_hash, 0,
49351 + sizeof (struct name_entry *) * name_set.n_size);
49352 + memset(inodev_set.i_hash, 0,
49353 + sizeof (struct inodev_entry *) * inodev_set.i_size);
49354 +
49355 + return 0;
49356 +}
49357 +
49358 +/* free information not needed after startup
49359 + currently contains user->kernel pointer mappings for subjects
49360 +*/
49361 +
49362 +static void
49363 +free_init_variables(void)
49364 +{
49365 + __u32 i;
49366 +
49367 + if (subj_map_set.s_hash) {
49368 + for (i = 0; i < subj_map_set.s_size; i++) {
49369 + if (subj_map_set.s_hash[i]) {
49370 + kfree(subj_map_set.s_hash[i]);
49371 + subj_map_set.s_hash[i] = NULL;
49372 + }
49373 + }
49374 +
49375 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49376 + PAGE_SIZE)
49377 + kfree(subj_map_set.s_hash);
49378 + else
49379 + vfree(subj_map_set.s_hash);
49380 + }
49381 +
49382 + return;
49383 +}
49384 +
49385 +static void
49386 +free_variables(void)
49387 +{
49388 + struct acl_subject_label *s;
49389 + struct acl_role_label *r;
49390 + struct task_struct *task, *task2;
49391 + unsigned int x;
49392 +
49393 + gr_clear_learn_entries();
49394 +
49395 + read_lock(&tasklist_lock);
49396 + do_each_thread(task2, task) {
49397 + task->acl_sp_role = 0;
49398 + task->acl_role_id = 0;
49399 + task->acl = NULL;
49400 + task->role = NULL;
49401 + } while_each_thread(task2, task);
49402 + read_unlock(&tasklist_lock);
49403 +
49404 + /* release the reference to the real root dentry and vfsmount */
49405 + path_put(&real_root);
49406 +
49407 + /* free all object hash tables */
49408 +
49409 + FOR_EACH_ROLE_START(r)
49410 + if (r->subj_hash == NULL)
49411 + goto next_role;
49412 + FOR_EACH_SUBJECT_START(r, s, x)
49413 + if (s->obj_hash == NULL)
49414 + break;
49415 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49416 + kfree(s->obj_hash);
49417 + else
49418 + vfree(s->obj_hash);
49419 + FOR_EACH_SUBJECT_END(s, x)
49420 + FOR_EACH_NESTED_SUBJECT_START(r, s)
49421 + if (s->obj_hash == NULL)
49422 + break;
49423 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49424 + kfree(s->obj_hash);
49425 + else
49426 + vfree(s->obj_hash);
49427 + FOR_EACH_NESTED_SUBJECT_END(s)
49428 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49429 + kfree(r->subj_hash);
49430 + else
49431 + vfree(r->subj_hash);
49432 + r->subj_hash = NULL;
49433 +next_role:
49434 + FOR_EACH_ROLE_END(r)
49435 +
49436 + acl_free_all();
49437 +
49438 + if (acl_role_set.r_hash) {
49439 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49440 + PAGE_SIZE)
49441 + kfree(acl_role_set.r_hash);
49442 + else
49443 + vfree(acl_role_set.r_hash);
49444 + }
49445 + if (name_set.n_hash) {
49446 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
49447 + PAGE_SIZE)
49448 + kfree(name_set.n_hash);
49449 + else
49450 + vfree(name_set.n_hash);
49451 + }
49452 +
49453 + if (inodev_set.i_hash) {
49454 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49455 + PAGE_SIZE)
49456 + kfree(inodev_set.i_hash);
49457 + else
49458 + vfree(inodev_set.i_hash);
49459 + }
49460 +
49461 + gr_free_uidset();
49462 +
49463 + memset(&name_set, 0, sizeof (struct name_db));
49464 + memset(&inodev_set, 0, sizeof (struct inodev_db));
49465 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49466 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49467 +
49468 + default_role = NULL;
49469 + role_list = NULL;
49470 +
49471 + return;
49472 +}
49473 +
49474 +static __u32
49475 +count_user_objs(struct acl_object_label *userp)
49476 +{
49477 + struct acl_object_label o_tmp;
49478 + __u32 num = 0;
49479 +
49480 + while (userp) {
49481 + if (copy_from_user(&o_tmp, userp,
49482 + sizeof (struct acl_object_label)))
49483 + break;
49484 +
49485 + userp = o_tmp.prev;
49486 + num++;
49487 + }
49488 +
49489 + return num;
49490 +}
49491 +
49492 +static struct acl_subject_label *
49493 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49494 +
49495 +static int
49496 +copy_user_glob(struct acl_object_label *obj)
49497 +{
49498 + struct acl_object_label *g_tmp, **guser;
49499 + unsigned int len;
49500 + char *tmp;
49501 +
49502 + if (obj->globbed == NULL)
49503 + return 0;
49504 +
49505 + guser = &obj->globbed;
49506 + while (*guser) {
49507 + g_tmp = (struct acl_object_label *)
49508 + acl_alloc(sizeof (struct acl_object_label));
49509 + if (g_tmp == NULL)
49510 + return -ENOMEM;
49511 +
49512 + if (copy_from_user(g_tmp, *guser,
49513 + sizeof (struct acl_object_label)))
49514 + return -EFAULT;
49515 +
49516 + len = strnlen_user(g_tmp->filename, PATH_MAX);
49517 +
49518 + if (!len || len >= PATH_MAX)
49519 + return -EINVAL;
49520 +
49521 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49522 + return -ENOMEM;
49523 +
49524 + if (copy_from_user(tmp, g_tmp->filename, len))
49525 + return -EFAULT;
49526 + tmp[len-1] = '\0';
49527 + g_tmp->filename = tmp;
49528 +
49529 + *guser = g_tmp;
49530 + guser = &(g_tmp->next);
49531 + }
49532 +
49533 + return 0;
49534 +}
49535 +
49536 +static int
49537 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49538 + struct acl_role_label *role)
49539 +{
49540 + struct acl_object_label *o_tmp;
49541 + unsigned int len;
49542 + int ret;
49543 + char *tmp;
49544 +
49545 + while (userp) {
49546 + if ((o_tmp = (struct acl_object_label *)
49547 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
49548 + return -ENOMEM;
49549 +
49550 + if (copy_from_user(o_tmp, userp,
49551 + sizeof (struct acl_object_label)))
49552 + return -EFAULT;
49553 +
49554 + userp = o_tmp->prev;
49555 +
49556 + len = strnlen_user(o_tmp->filename, PATH_MAX);
49557 +
49558 + if (!len || len >= PATH_MAX)
49559 + return -EINVAL;
49560 +
49561 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49562 + return -ENOMEM;
49563 +
49564 + if (copy_from_user(tmp, o_tmp->filename, len))
49565 + return -EFAULT;
49566 + tmp[len-1] = '\0';
49567 + o_tmp->filename = tmp;
49568 +
49569 + insert_acl_obj_label(o_tmp, subj);
49570 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49571 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49572 + return -ENOMEM;
49573 +
49574 + ret = copy_user_glob(o_tmp);
49575 + if (ret)
49576 + return ret;
49577 +
49578 + if (o_tmp->nested) {
49579 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49580 + if (IS_ERR(o_tmp->nested))
49581 + return PTR_ERR(o_tmp->nested);
49582 +
49583 + /* insert into nested subject list */
49584 + o_tmp->nested->next = role->hash->first;
49585 + role->hash->first = o_tmp->nested;
49586 + }
49587 + }
49588 +
49589 + return 0;
49590 +}
49591 +
49592 +static __u32
49593 +count_user_subjs(struct acl_subject_label *userp)
49594 +{
49595 + struct acl_subject_label s_tmp;
49596 + __u32 num = 0;
49597 +
49598 + while (userp) {
49599 + if (copy_from_user(&s_tmp, userp,
49600 + sizeof (struct acl_subject_label)))
49601 + break;
49602 +
49603 + userp = s_tmp.prev;
49604 + /* do not count nested subjects against this count, since
49605 + they are not included in the hash table, but are
49606 + attached to objects. We have already counted
49607 + the subjects in userspace for the allocation
49608 + stack
49609 + */
49610 + if (!(s_tmp.mode & GR_NESTED))
49611 + num++;
49612 + }
49613 +
49614 + return num;
49615 +}
49616 +
49617 +static int
49618 +copy_user_allowedips(struct acl_role_label *rolep)
49619 +{
49620 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49621 +
49622 + ruserip = rolep->allowed_ips;
49623 +
49624 + while (ruserip) {
49625 + rlast = rtmp;
49626 +
49627 + if ((rtmp = (struct role_allowed_ip *)
49628 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49629 + return -ENOMEM;
49630 +
49631 + if (copy_from_user(rtmp, ruserip,
49632 + sizeof (struct role_allowed_ip)))
49633 + return -EFAULT;
49634 +
49635 + ruserip = rtmp->prev;
49636 +
49637 + if (!rlast) {
49638 + rtmp->prev = NULL;
49639 + rolep->allowed_ips = rtmp;
49640 + } else {
49641 + rlast->next = rtmp;
49642 + rtmp->prev = rlast;
49643 + }
49644 +
49645 + if (!ruserip)
49646 + rtmp->next = NULL;
49647 + }
49648 +
49649 + return 0;
49650 +}
49651 +
49652 +static int
49653 +copy_user_transitions(struct acl_role_label *rolep)
49654 +{
49655 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
49656 +
49657 + unsigned int len;
49658 + char *tmp;
49659 +
49660 + rusertp = rolep->transitions;
49661 +
49662 + while (rusertp) {
49663 + rlast = rtmp;
49664 +
49665 + if ((rtmp = (struct role_transition *)
49666 + acl_alloc(sizeof (struct role_transition))) == NULL)
49667 + return -ENOMEM;
49668 +
49669 + if (copy_from_user(rtmp, rusertp,
49670 + sizeof (struct role_transition)))
49671 + return -EFAULT;
49672 +
49673 + rusertp = rtmp->prev;
49674 +
49675 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49676 +
49677 + if (!len || len >= GR_SPROLE_LEN)
49678 + return -EINVAL;
49679 +
49680 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49681 + return -ENOMEM;
49682 +
49683 + if (copy_from_user(tmp, rtmp->rolename, len))
49684 + return -EFAULT;
49685 + tmp[len-1] = '\0';
49686 + rtmp->rolename = tmp;
49687 +
49688 + if (!rlast) {
49689 + rtmp->prev = NULL;
49690 + rolep->transitions = rtmp;
49691 + } else {
49692 + rlast->next = rtmp;
49693 + rtmp->prev = rlast;
49694 + }
49695 +
49696 + if (!rusertp)
49697 + rtmp->next = NULL;
49698 + }
49699 +
49700 + return 0;
49701 +}
49702 +
49703 +static struct acl_subject_label *
49704 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49705 +{
49706 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49707 + unsigned int len;
49708 + char *tmp;
49709 + __u32 num_objs;
49710 + struct acl_ip_label **i_tmp, *i_utmp2;
49711 + struct gr_hash_struct ghash;
49712 + struct subject_map *subjmap;
49713 + unsigned int i_num;
49714 + int err;
49715 +
49716 + s_tmp = lookup_subject_map(userp);
49717 +
49718 + /* we've already copied this subject into the kernel, just return
49719 + the reference to it, and don't copy it over again
49720 + */
49721 + if (s_tmp)
49722 + return(s_tmp);
49723 +
49724 + if ((s_tmp = (struct acl_subject_label *)
49725 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49726 + return ERR_PTR(-ENOMEM);
49727 +
49728 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49729 + if (subjmap == NULL)
49730 + return ERR_PTR(-ENOMEM);
49731 +
49732 + subjmap->user = userp;
49733 + subjmap->kernel = s_tmp;
49734 + insert_subj_map_entry(subjmap);
49735 +
49736 + if (copy_from_user(s_tmp, userp,
49737 + sizeof (struct acl_subject_label)))
49738 + return ERR_PTR(-EFAULT);
49739 +
49740 + len = strnlen_user(s_tmp->filename, PATH_MAX);
49741 +
49742 + if (!len || len >= PATH_MAX)
49743 + return ERR_PTR(-EINVAL);
49744 +
49745 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49746 + return ERR_PTR(-ENOMEM);
49747 +
49748 + if (copy_from_user(tmp, s_tmp->filename, len))
49749 + return ERR_PTR(-EFAULT);
49750 + tmp[len-1] = '\0';
49751 + s_tmp->filename = tmp;
49752 +
49753 + if (!strcmp(s_tmp->filename, "/"))
49754 + role->root_label = s_tmp;
49755 +
49756 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49757 + return ERR_PTR(-EFAULT);
49758 +
49759 + /* copy user and group transition tables */
49760 +
49761 + if (s_tmp->user_trans_num) {
49762 + uid_t *uidlist;
49763 +
49764 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49765 + if (uidlist == NULL)
49766 + return ERR_PTR(-ENOMEM);
49767 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49768 + return ERR_PTR(-EFAULT);
49769 +
49770 + s_tmp->user_transitions = uidlist;
49771 + }
49772 +
49773 + if (s_tmp->group_trans_num) {
49774 + gid_t *gidlist;
49775 +
49776 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49777 + if (gidlist == NULL)
49778 + return ERR_PTR(-ENOMEM);
49779 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49780 + return ERR_PTR(-EFAULT);
49781 +
49782 + s_tmp->group_transitions = gidlist;
49783 + }
49784 +
49785 + /* set up object hash table */
49786 + num_objs = count_user_objs(ghash.first);
49787 +
49788 + s_tmp->obj_hash_size = num_objs;
49789 + s_tmp->obj_hash =
49790 + (struct acl_object_label **)
49791 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49792 +
49793 + if (!s_tmp->obj_hash)
49794 + return ERR_PTR(-ENOMEM);
49795 +
49796 + memset(s_tmp->obj_hash, 0,
49797 + s_tmp->obj_hash_size *
49798 + sizeof (struct acl_object_label *));
49799 +
49800 + /* add in objects */
49801 + err = copy_user_objs(ghash.first, s_tmp, role);
49802 +
49803 + if (err)
49804 + return ERR_PTR(err);
49805 +
49806 + /* set pointer for parent subject */
49807 + if (s_tmp->parent_subject) {
49808 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49809 +
49810 + if (IS_ERR(s_tmp2))
49811 + return s_tmp2;
49812 +
49813 + s_tmp->parent_subject = s_tmp2;
49814 + }
49815 +
49816 + /* add in ip acls */
49817 +
49818 + if (!s_tmp->ip_num) {
49819 + s_tmp->ips = NULL;
49820 + goto insert;
49821 + }
49822 +
49823 + i_tmp =
49824 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49825 + sizeof (struct acl_ip_label *));
49826 +
49827 + if (!i_tmp)
49828 + return ERR_PTR(-ENOMEM);
49829 +
49830 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49831 + *(i_tmp + i_num) =
49832 + (struct acl_ip_label *)
49833 + acl_alloc(sizeof (struct acl_ip_label));
49834 + if (!*(i_tmp + i_num))
49835 + return ERR_PTR(-ENOMEM);
49836 +
49837 + if (copy_from_user
49838 + (&i_utmp2, s_tmp->ips + i_num,
49839 + sizeof (struct acl_ip_label *)))
49840 + return ERR_PTR(-EFAULT);
49841 +
49842 + if (copy_from_user
49843 + (*(i_tmp + i_num), i_utmp2,
49844 + sizeof (struct acl_ip_label)))
49845 + return ERR_PTR(-EFAULT);
49846 +
49847 + if ((*(i_tmp + i_num))->iface == NULL)
49848 + continue;
49849 +
49850 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49851 + if (!len || len >= IFNAMSIZ)
49852 + return ERR_PTR(-EINVAL);
49853 + tmp = acl_alloc(len);
49854 + if (tmp == NULL)
49855 + return ERR_PTR(-ENOMEM);
49856 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49857 + return ERR_PTR(-EFAULT);
49858 + (*(i_tmp + i_num))->iface = tmp;
49859 + }
49860 +
49861 + s_tmp->ips = i_tmp;
49862 +
49863 +insert:
49864 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49865 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49866 + return ERR_PTR(-ENOMEM);
49867 +
49868 + return s_tmp;
49869 +}
49870 +
49871 +static int
49872 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49873 +{
49874 + struct acl_subject_label s_pre;
49875 + struct acl_subject_label * ret;
49876 + int err;
49877 +
49878 + while (userp) {
49879 + if (copy_from_user(&s_pre, userp,
49880 + sizeof (struct acl_subject_label)))
49881 + return -EFAULT;
49882 +
49883 + /* do not add nested subjects here, add
49884 + while parsing objects
49885 + */
49886 +
49887 + if (s_pre.mode & GR_NESTED) {
49888 + userp = s_pre.prev;
49889 + continue;
49890 + }
49891 +
49892 + ret = do_copy_user_subj(userp, role);
49893 +
49894 + err = PTR_ERR(ret);
49895 + if (IS_ERR(ret))
49896 + return err;
49897 +
49898 + insert_acl_subj_label(ret, role);
49899 +
49900 + userp = s_pre.prev;
49901 + }
49902 +
49903 + return 0;
49904 +}
49905 +
49906 +static int
49907 +copy_user_acl(struct gr_arg *arg)
49908 +{
49909 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49910 + struct sprole_pw *sptmp;
49911 + struct gr_hash_struct *ghash;
49912 + uid_t *domainlist;
49913 + unsigned int r_num;
49914 + unsigned int len;
49915 + char *tmp;
49916 + int err = 0;
49917 + __u16 i;
49918 + __u32 num_subjs;
49919 +
49920 + /* we need a default and kernel role */
49921 + if (arg->role_db.num_roles < 2)
49922 + return -EINVAL;
49923 +
49924 + /* copy special role authentication info from userspace */
49925 +
49926 + num_sprole_pws = arg->num_sprole_pws;
49927 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49928 +
49929 + if (!acl_special_roles) {
49930 + err = -ENOMEM;
49931 + goto cleanup;
49932 + }
49933 +
49934 + for (i = 0; i < num_sprole_pws; i++) {
49935 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49936 + if (!sptmp) {
49937 + err = -ENOMEM;
49938 + goto cleanup;
49939 + }
49940 + if (copy_from_user(sptmp, arg->sprole_pws + i,
49941 + sizeof (struct sprole_pw))) {
49942 + err = -EFAULT;
49943 + goto cleanup;
49944 + }
49945 +
49946 + len =
49947 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49948 +
49949 + if (!len || len >= GR_SPROLE_LEN) {
49950 + err = -EINVAL;
49951 + goto cleanup;
49952 + }
49953 +
49954 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49955 + err = -ENOMEM;
49956 + goto cleanup;
49957 + }
49958 +
49959 + if (copy_from_user(tmp, sptmp->rolename, len)) {
49960 + err = -EFAULT;
49961 + goto cleanup;
49962 + }
49963 + tmp[len-1] = '\0';
49964 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49965 + printk(KERN_ALERT "Copying special role %s\n", tmp);
49966 +#endif
49967 + sptmp->rolename = tmp;
49968 + acl_special_roles[i] = sptmp;
49969 + }
49970 +
49971 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49972 +
49973 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49974 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
49975 +
49976 + if (!r_tmp) {
49977 + err = -ENOMEM;
49978 + goto cleanup;
49979 + }
49980 +
49981 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
49982 + sizeof (struct acl_role_label *))) {
49983 + err = -EFAULT;
49984 + goto cleanup;
49985 + }
49986 +
49987 + if (copy_from_user(r_tmp, r_utmp2,
49988 + sizeof (struct acl_role_label))) {
49989 + err = -EFAULT;
49990 + goto cleanup;
49991 + }
49992 +
49993 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49994 +
49995 + if (!len || len >= PATH_MAX) {
49996 + err = -EINVAL;
49997 + goto cleanup;
49998 + }
49999 +
50000 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
50001 + err = -ENOMEM;
50002 + goto cleanup;
50003 + }
50004 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
50005 + err = -EFAULT;
50006 + goto cleanup;
50007 + }
50008 + tmp[len-1] = '\0';
50009 + r_tmp->rolename = tmp;
50010 +
50011 + if (!strcmp(r_tmp->rolename, "default")
50012 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
50013 + default_role = r_tmp;
50014 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
50015 + kernel_role = r_tmp;
50016 + }
50017 +
50018 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
50019 + err = -ENOMEM;
50020 + goto cleanup;
50021 + }
50022 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
50023 + err = -EFAULT;
50024 + goto cleanup;
50025 + }
50026 +
50027 + r_tmp->hash = ghash;
50028 +
50029 + num_subjs = count_user_subjs(r_tmp->hash->first);
50030 +
50031 + r_tmp->subj_hash_size = num_subjs;
50032 + r_tmp->subj_hash =
50033 + (struct acl_subject_label **)
50034 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
50035 +
50036 + if (!r_tmp->subj_hash) {
50037 + err = -ENOMEM;
50038 + goto cleanup;
50039 + }
50040 +
50041 + err = copy_user_allowedips(r_tmp);
50042 + if (err)
50043 + goto cleanup;
50044 +
50045 + /* copy domain info */
50046 + if (r_tmp->domain_children != NULL) {
50047 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
50048 + if (domainlist == NULL) {
50049 + err = -ENOMEM;
50050 + goto cleanup;
50051 + }
50052 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
50053 + err = -EFAULT;
50054 + goto cleanup;
50055 + }
50056 + r_tmp->domain_children = domainlist;
50057 + }
50058 +
50059 + err = copy_user_transitions(r_tmp);
50060 + if (err)
50061 + goto cleanup;
50062 +
50063 + memset(r_tmp->subj_hash, 0,
50064 + r_tmp->subj_hash_size *
50065 + sizeof (struct acl_subject_label *));
50066 +
50067 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
50068 +
50069 + if (err)
50070 + goto cleanup;
50071 +
50072 + /* set nested subject list to null */
50073 + r_tmp->hash->first = NULL;
50074 +
50075 + insert_acl_role_label(r_tmp);
50076 + }
50077 +
50078 + goto return_err;
50079 + cleanup:
50080 + free_variables();
50081 + return_err:
50082 + return err;
50083 +
50084 +}
50085 +
50086 +static int
50087 +gracl_init(struct gr_arg *args)
50088 +{
50089 + int error = 0;
50090 +
50091 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
50092 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
50093 +
50094 + if (init_variables(args)) {
50095 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
50096 + error = -ENOMEM;
50097 + free_variables();
50098 + goto out;
50099 + }
50100 +
50101 + error = copy_user_acl(args);
50102 + free_init_variables();
50103 + if (error) {
50104 + free_variables();
50105 + goto out;
50106 + }
50107 +
50108 + if ((error = gr_set_acls(0))) {
50109 + free_variables();
50110 + goto out;
50111 + }
50112 +
50113 + pax_open_kernel();
50114 + gr_status |= GR_READY;
50115 + pax_close_kernel();
50116 +
50117 + out:
50118 + return error;
50119 +}
50120 +
50121 +/* derived from glibc fnmatch() 0: match, 1: no match*/
50122 +
50123 +static int
50124 +glob_match(const char *p, const char *n)
50125 +{
50126 + char c;
50127 +
50128 + while ((c = *p++) != '\0') {
50129 + switch (c) {
50130 + case '?':
50131 + if (*n == '\0')
50132 + return 1;
50133 + else if (*n == '/')
50134 + return 1;
50135 + break;
50136 + case '\\':
50137 + if (*n != c)
50138 + return 1;
50139 + break;
50140 + case '*':
50141 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
50142 + if (*n == '/')
50143 + return 1;
50144 + else if (c == '?') {
50145 + if (*n == '\0')
50146 + return 1;
50147 + else
50148 + ++n;
50149 + }
50150 + }
50151 + if (c == '\0') {
50152 + return 0;
50153 + } else {
50154 + const char *endp;
50155 +
50156 + if ((endp = strchr(n, '/')) == NULL)
50157 + endp = n + strlen(n);
50158 +
50159 + if (c == '[') {
50160 + for (--p; n < endp; ++n)
50161 + if (!glob_match(p, n))
50162 + return 0;
50163 + } else if (c == '/') {
50164 + while (*n != '\0' && *n != '/')
50165 + ++n;
50166 + if (*n == '/' && !glob_match(p, n + 1))
50167 + return 0;
50168 + } else {
50169 + for (--p; n < endp; ++n)
50170 + if (*n == c && !glob_match(p, n))
50171 + return 0;
50172 + }
50173 +
50174 + return 1;
50175 + }
50176 + case '[':
50177 + {
50178 + int not;
50179 + char cold;
50180 +
50181 + if (*n == '\0' || *n == '/')
50182 + return 1;
50183 +
50184 + not = (*p == '!' || *p == '^');
50185 + if (not)
50186 + ++p;
50187 +
50188 + c = *p++;
50189 + for (;;) {
50190 + unsigned char fn = (unsigned char)*n;
50191 +
50192 + if (c == '\0')
50193 + return 1;
50194 + else {
50195 + if (c == fn)
50196 + goto matched;
50197 + cold = c;
50198 + c = *p++;
50199 +
50200 + if (c == '-' && *p != ']') {
50201 + unsigned char cend = *p++;
50202 +
50203 + if (cend == '\0')
50204 + return 1;
50205 +
50206 + if (cold <= fn && fn <= cend)
50207 + goto matched;
50208 +
50209 + c = *p++;
50210 + }
50211 + }
50212 +
50213 + if (c == ']')
50214 + break;
50215 + }
50216 + if (!not)
50217 + return 1;
50218 + break;
50219 + matched:
50220 + while (c != ']') {
50221 + if (c == '\0')
50222 + return 1;
50223 +
50224 + c = *p++;
50225 + }
50226 + if (not)
50227 + return 1;
50228 + }
50229 + break;
50230 + default:
50231 + if (c != *n)
50232 + return 1;
50233 + }
50234 +
50235 + ++n;
50236 + }
50237 +
50238 + if (*n == '\0')
50239 + return 0;
50240 +
50241 + if (*n == '/')
50242 + return 0;
50243 +
50244 + return 1;
50245 +}
50246 +
50247 +static struct acl_object_label *
50248 +chk_glob_label(struct acl_object_label *globbed,
50249 + struct dentry *dentry, struct vfsmount *mnt, char **path)
50250 +{
50251 + struct acl_object_label *tmp;
50252 +
50253 + if (*path == NULL)
50254 + *path = gr_to_filename_nolock(dentry, mnt);
50255 +
50256 + tmp = globbed;
50257 +
50258 + while (tmp) {
50259 + if (!glob_match(tmp->filename, *path))
50260 + return tmp;
50261 + tmp = tmp->next;
50262 + }
50263 +
50264 + return NULL;
50265 +}
50266 +
50267 +static struct acl_object_label *
50268 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50269 + const ino_t curr_ino, const dev_t curr_dev,
50270 + const struct acl_subject_label *subj, char **path, const int checkglob)
50271 +{
50272 + struct acl_subject_label *tmpsubj;
50273 + struct acl_object_label *retval;
50274 + struct acl_object_label *retval2;
50275 +
50276 + tmpsubj = (struct acl_subject_label *) subj;
50277 + read_lock(&gr_inode_lock);
50278 + do {
50279 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50280 + if (retval) {
50281 + if (checkglob && retval->globbed) {
50282 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50283 + (struct vfsmount *)orig_mnt, path);
50284 + if (retval2)
50285 + retval = retval2;
50286 + }
50287 + break;
50288 + }
50289 + } while ((tmpsubj = tmpsubj->parent_subject));
50290 + read_unlock(&gr_inode_lock);
50291 +
50292 + return retval;
50293 +}
50294 +
50295 +static __inline__ struct acl_object_label *
50296 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50297 + struct dentry *curr_dentry,
50298 + const struct acl_subject_label *subj, char **path, const int checkglob)
50299 +{
50300 + int newglob = checkglob;
50301 + ino_t inode;
50302 + dev_t device;
50303 +
50304 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50305 + as we don't want a / * rule to match instead of the / object
50306 + don't do this for create lookups that call this function though, since they're looking up
50307 + on the parent and thus need globbing checks on all paths
50308 + */
50309 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50310 + newglob = GR_NO_GLOB;
50311 +
50312 + spin_lock(&curr_dentry->d_lock);
50313 + inode = curr_dentry->d_inode->i_ino;
50314 + device = __get_dev(curr_dentry);
50315 + spin_unlock(&curr_dentry->d_lock);
50316 +
50317 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50318 +}
50319 +
50320 +static struct acl_object_label *
50321 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50322 + const struct acl_subject_label *subj, char *path, const int checkglob)
50323 +{
50324 + struct dentry *dentry = (struct dentry *) l_dentry;
50325 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50326 + struct acl_object_label *retval;
50327 + struct dentry *parent;
50328 +
50329 + write_seqlock(&rename_lock);
50330 + br_read_lock(vfsmount_lock);
50331 +
50332 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50333 +#ifdef CONFIG_NET
50334 + mnt == sock_mnt ||
50335 +#endif
50336 +#ifdef CONFIG_HUGETLBFS
50337 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50338 +#endif
50339 + /* ignore Eric Biederman */
50340 + IS_PRIVATE(l_dentry->d_inode))) {
50341 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50342 + goto out;
50343 + }
50344 +
50345 + for (;;) {
50346 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50347 + break;
50348 +
50349 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50350 + if (mnt->mnt_parent == mnt)
50351 + break;
50352 +
50353 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50354 + if (retval != NULL)
50355 + goto out;
50356 +
50357 + dentry = mnt->mnt_mountpoint;
50358 + mnt = mnt->mnt_parent;
50359 + continue;
50360 + }
50361 +
50362 + parent = dentry->d_parent;
50363 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50364 + if (retval != NULL)
50365 + goto out;
50366 +
50367 + dentry = parent;
50368 + }
50369 +
50370 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50371 +
50372 + /* real_root is pinned so we don't have to hold a reference */
50373 + if (retval == NULL)
50374 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50375 +out:
50376 + br_read_unlock(vfsmount_lock);
50377 + write_sequnlock(&rename_lock);
50378 +
50379 + BUG_ON(retval == NULL);
50380 +
50381 + return retval;
50382 +}
50383 +
50384 +static __inline__ struct acl_object_label *
50385 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50386 + const struct acl_subject_label *subj)
50387 +{
50388 + char *path = NULL;
50389 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50390 +}
50391 +
50392 +static __inline__ struct acl_object_label *
50393 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50394 + const struct acl_subject_label *subj)
50395 +{
50396 + char *path = NULL;
50397 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50398 +}
50399 +
50400 +static __inline__ struct acl_object_label *
50401 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50402 + const struct acl_subject_label *subj, char *path)
50403 +{
50404 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50405 +}
50406 +
50407 +static struct acl_subject_label *
50408 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50409 + const struct acl_role_label *role)
50410 +{
50411 + struct dentry *dentry = (struct dentry *) l_dentry;
50412 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50413 + struct acl_subject_label *retval;
50414 + struct dentry *parent;
50415 +
50416 + write_seqlock(&rename_lock);
50417 + br_read_lock(vfsmount_lock);
50418 +
50419 + for (;;) {
50420 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50421 + break;
50422 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50423 + if (mnt->mnt_parent == mnt)
50424 + break;
50425 +
50426 + spin_lock(&dentry->d_lock);
50427 + read_lock(&gr_inode_lock);
50428 + retval =
50429 + lookup_acl_subj_label(dentry->d_inode->i_ino,
50430 + __get_dev(dentry), role);
50431 + read_unlock(&gr_inode_lock);
50432 + spin_unlock(&dentry->d_lock);
50433 + if (retval != NULL)
50434 + goto out;
50435 +
50436 + dentry = mnt->mnt_mountpoint;
50437 + mnt = mnt->mnt_parent;
50438 + continue;
50439 + }
50440 +
50441 + spin_lock(&dentry->d_lock);
50442 + read_lock(&gr_inode_lock);
50443 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50444 + __get_dev(dentry), role);
50445 + read_unlock(&gr_inode_lock);
50446 + parent = dentry->d_parent;
50447 + spin_unlock(&dentry->d_lock);
50448 +
50449 + if (retval != NULL)
50450 + goto out;
50451 +
50452 + dentry = parent;
50453 + }
50454 +
50455 + spin_lock(&dentry->d_lock);
50456 + read_lock(&gr_inode_lock);
50457 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50458 + __get_dev(dentry), role);
50459 + read_unlock(&gr_inode_lock);
50460 + spin_unlock(&dentry->d_lock);
50461 +
50462 + if (unlikely(retval == NULL)) {
50463 + /* real_root is pinned, we don't need to hold a reference */
50464 + read_lock(&gr_inode_lock);
50465 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50466 + __get_dev(real_root.dentry), role);
50467 + read_unlock(&gr_inode_lock);
50468 + }
50469 +out:
50470 + br_read_unlock(vfsmount_lock);
50471 + write_sequnlock(&rename_lock);
50472 +
50473 + BUG_ON(retval == NULL);
50474 +
50475 + return retval;
50476 +}
50477 +
50478 +static void
50479 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50480 +{
50481 + struct task_struct *task = current;
50482 + const struct cred *cred = current_cred();
50483 +
50484 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50485 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50486 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50487 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50488 +
50489 + return;
50490 +}
50491 +
50492 +static void
50493 +gr_log_learn_sysctl(const char *path, const __u32 mode)
50494 +{
50495 + struct task_struct *task = current;
50496 + const struct cred *cred = current_cred();
50497 +
50498 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50499 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50500 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50501 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50502 +
50503 + return;
50504 +}
50505 +
50506 +static void
50507 +gr_log_learn_id_change(const char type, const unsigned int real,
50508 + const unsigned int effective, const unsigned int fs)
50509 +{
50510 + struct task_struct *task = current;
50511 + const struct cred *cred = current_cred();
50512 +
50513 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50514 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50515 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50516 + type, real, effective, fs, &task->signal->saved_ip);
50517 +
50518 + return;
50519 +}
50520 +
50521 +__u32
50522 +gr_search_file(const struct dentry * dentry, const __u32 mode,
50523 + const struct vfsmount * mnt)
50524 +{
50525 + __u32 retval = mode;
50526 + struct acl_subject_label *curracl;
50527 + struct acl_object_label *currobj;
50528 +
50529 + if (unlikely(!(gr_status & GR_READY)))
50530 + return (mode & ~GR_AUDITS);
50531 +
50532 + curracl = current->acl;
50533 +
50534 + currobj = chk_obj_label(dentry, mnt, curracl);
50535 + retval = currobj->mode & mode;
50536 +
50537 + /* if we're opening a specified transfer file for writing
50538 + (e.g. /dev/initctl), then transfer our role to init
50539 + */
50540 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50541 + current->role->roletype & GR_ROLE_PERSIST)) {
50542 + struct task_struct *task = init_pid_ns.child_reaper;
50543 +
50544 + if (task->role != current->role) {
50545 + task->acl_sp_role = 0;
50546 + task->acl_role_id = current->acl_role_id;
50547 + task->role = current->role;
50548 + rcu_read_lock();
50549 + read_lock(&grsec_exec_file_lock);
50550 + gr_apply_subject_to_task(task);
50551 + read_unlock(&grsec_exec_file_lock);
50552 + rcu_read_unlock();
50553 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50554 + }
50555 + }
50556 +
50557 + if (unlikely
50558 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50559 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50560 + __u32 new_mode = mode;
50561 +
50562 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50563 +
50564 + retval = new_mode;
50565 +
50566 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50567 + new_mode |= GR_INHERIT;
50568 +
50569 + if (!(mode & GR_NOLEARN))
50570 + gr_log_learn(dentry, mnt, new_mode);
50571 + }
50572 +
50573 + return retval;
50574 +}
50575 +
50576 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50577 + const struct dentry *parent,
50578 + const struct vfsmount *mnt)
50579 +{
50580 + struct name_entry *match;
50581 + struct acl_object_label *matchpo;
50582 + struct acl_subject_label *curracl;
50583 + char *path;
50584 +
50585 + if (unlikely(!(gr_status & GR_READY)))
50586 + return NULL;
50587 +
50588 + preempt_disable();
50589 + path = gr_to_filename_rbac(new_dentry, mnt);
50590 + match = lookup_name_entry_create(path);
50591 +
50592 + curracl = current->acl;
50593 +
50594 + if (match) {
50595 + read_lock(&gr_inode_lock);
50596 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50597 + read_unlock(&gr_inode_lock);
50598 +
50599 + if (matchpo) {
50600 + preempt_enable();
50601 + return matchpo;
50602 + }
50603 + }
50604 +
50605 + // lookup parent
50606 +
50607 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50608 +
50609 + preempt_enable();
50610 + return matchpo;
50611 +}
50612 +
50613 +__u32
50614 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50615 + const struct vfsmount * mnt, const __u32 mode)
50616 +{
50617 + struct acl_object_label *matchpo;
50618 + __u32 retval;
50619 +
50620 + if (unlikely(!(gr_status & GR_READY)))
50621 + return (mode & ~GR_AUDITS);
50622 +
50623 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
50624 +
50625 + retval = matchpo->mode & mode;
50626 +
50627 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50628 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50629 + __u32 new_mode = mode;
50630 +
50631 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50632 +
50633 + gr_log_learn(new_dentry, mnt, new_mode);
50634 + return new_mode;
50635 + }
50636 +
50637 + return retval;
50638 +}
50639 +
50640 +__u32
50641 +gr_check_link(const struct dentry * new_dentry,
50642 + const struct dentry * parent_dentry,
50643 + const struct vfsmount * parent_mnt,
50644 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50645 +{
50646 + struct acl_object_label *obj;
50647 + __u32 oldmode, newmode;
50648 + __u32 needmode;
50649 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50650 + GR_DELETE | GR_INHERIT;
50651 +
50652 + if (unlikely(!(gr_status & GR_READY)))
50653 + return (GR_CREATE | GR_LINK);
50654 +
50655 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50656 + oldmode = obj->mode;
50657 +
50658 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50659 + newmode = obj->mode;
50660 +
50661 + needmode = newmode & checkmodes;
50662 +
50663 + // old name for hardlink must have at least the permissions of the new name
50664 + if ((oldmode & needmode) != needmode)
50665 + goto bad;
50666 +
50667 + // if old name had restrictions/auditing, make sure the new name does as well
50668 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50669 +
50670 + // don't allow hardlinking of suid/sgid files without permission
50671 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50672 + needmode |= GR_SETID;
50673 +
50674 + if ((newmode & needmode) != needmode)
50675 + goto bad;
50676 +
50677 + // enforce minimum permissions
50678 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50679 + return newmode;
50680 +bad:
50681 + needmode = oldmode;
50682 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50683 + needmode |= GR_SETID;
50684 +
50685 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50686 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50687 + return (GR_CREATE | GR_LINK);
50688 + } else if (newmode & GR_SUPPRESS)
50689 + return GR_SUPPRESS;
50690 + else
50691 + return 0;
50692 +}
50693 +
50694 +int
50695 +gr_check_hidden_task(const struct task_struct *task)
50696 +{
50697 + if (unlikely(!(gr_status & GR_READY)))
50698 + return 0;
50699 +
50700 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50701 + return 1;
50702 +
50703 + return 0;
50704 +}
50705 +
50706 +int
50707 +gr_check_protected_task(const struct task_struct *task)
50708 +{
50709 + if (unlikely(!(gr_status & GR_READY) || !task))
50710 + return 0;
50711 +
50712 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50713 + task->acl != current->acl)
50714 + return 1;
50715 +
50716 + return 0;
50717 +}
50718 +
50719 +int
50720 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50721 +{
50722 + struct task_struct *p;
50723 + int ret = 0;
50724 +
50725 + if (unlikely(!(gr_status & GR_READY) || !pid))
50726 + return ret;
50727 +
50728 + read_lock(&tasklist_lock);
50729 + do_each_pid_task(pid, type, p) {
50730 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50731 + p->acl != current->acl) {
50732 + ret = 1;
50733 + goto out;
50734 + }
50735 + } while_each_pid_task(pid, type, p);
50736 +out:
50737 + read_unlock(&tasklist_lock);
50738 +
50739 + return ret;
50740 +}
50741 +
50742 +void
50743 +gr_copy_label(struct task_struct *tsk)
50744 +{
50745 + /* plain copying of fields is already done by dup_task_struct */
50746 + tsk->signal->used_accept = 0;
50747 + tsk->acl_sp_role = 0;
50748 + //tsk->acl_role_id = current->acl_role_id;
50749 + //tsk->acl = current->acl;
50750 + //tsk->role = current->role;
50751 + tsk->signal->curr_ip = current->signal->curr_ip;
50752 + tsk->signal->saved_ip = current->signal->saved_ip;
50753 + if (current->exec_file)
50754 + get_file(current->exec_file);
50755 + //tsk->exec_file = current->exec_file;
50756 + //tsk->is_writable = current->is_writable;
50757 + if (unlikely(current->signal->used_accept)) {
50758 + current->signal->curr_ip = 0;
50759 + current->signal->saved_ip = 0;
50760 + }
50761 +
50762 + return;
50763 +}
50764 +
50765 +static void
50766 +gr_set_proc_res(struct task_struct *task)
50767 +{
50768 + struct acl_subject_label *proc;
50769 + unsigned short i;
50770 +
50771 + proc = task->acl;
50772 +
50773 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50774 + return;
50775 +
50776 + for (i = 0; i < RLIM_NLIMITS; i++) {
50777 + if (!(proc->resmask & (1 << i)))
50778 + continue;
50779 +
50780 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50781 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50782 + }
50783 +
50784 + return;
50785 +}
50786 +
50787 +extern int __gr_process_user_ban(struct user_struct *user);
50788 +
50789 +int
50790 +gr_check_user_change(int real, int effective, int fs)
50791 +{
50792 + unsigned int i;
50793 + __u16 num;
50794 + uid_t *uidlist;
50795 + int curuid;
50796 + int realok = 0;
50797 + int effectiveok = 0;
50798 + int fsok = 0;
50799 +
50800 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50801 + struct user_struct *user;
50802 +
50803 + if (real == -1)
50804 + goto skipit;
50805 +
50806 + user = find_user(real);
50807 + if (user == NULL)
50808 + goto skipit;
50809 +
50810 + if (__gr_process_user_ban(user)) {
50811 + /* for find_user */
50812 + free_uid(user);
50813 + return 1;
50814 + }
50815 +
50816 + /* for find_user */
50817 + free_uid(user);
50818 +
50819 +skipit:
50820 +#endif
50821 +
50822 + if (unlikely(!(gr_status & GR_READY)))
50823 + return 0;
50824 +
50825 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50826 + gr_log_learn_id_change('u', real, effective, fs);
50827 +
50828 + num = current->acl->user_trans_num;
50829 + uidlist = current->acl->user_transitions;
50830 +
50831 + if (uidlist == NULL)
50832 + return 0;
50833 +
50834 + if (real == -1)
50835 + realok = 1;
50836 + if (effective == -1)
50837 + effectiveok = 1;
50838 + if (fs == -1)
50839 + fsok = 1;
50840 +
50841 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
50842 + for (i = 0; i < num; i++) {
50843 + curuid = (int)uidlist[i];
50844 + if (real == curuid)
50845 + realok = 1;
50846 + if (effective == curuid)
50847 + effectiveok = 1;
50848 + if (fs == curuid)
50849 + fsok = 1;
50850 + }
50851 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
50852 + for (i = 0; i < num; i++) {
50853 + curuid = (int)uidlist[i];
50854 + if (real == curuid)
50855 + break;
50856 + if (effective == curuid)
50857 + break;
50858 + if (fs == curuid)
50859 + break;
50860 + }
50861 + /* not in deny list */
50862 + if (i == num) {
50863 + realok = 1;
50864 + effectiveok = 1;
50865 + fsok = 1;
50866 + }
50867 + }
50868 +
50869 + if (realok && effectiveok && fsok)
50870 + return 0;
50871 + else {
50872 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50873 + return 1;
50874 + }
50875 +}
50876 +
50877 +int
50878 +gr_check_group_change(int real, int effective, int fs)
50879 +{
50880 + unsigned int i;
50881 + __u16 num;
50882 + gid_t *gidlist;
50883 + int curgid;
50884 + int realok = 0;
50885 + int effectiveok = 0;
50886 + int fsok = 0;
50887 +
50888 + if (unlikely(!(gr_status & GR_READY)))
50889 + return 0;
50890 +
50891 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50892 + gr_log_learn_id_change('g', real, effective, fs);
50893 +
50894 + num = current->acl->group_trans_num;
50895 + gidlist = current->acl->group_transitions;
50896 +
50897 + if (gidlist == NULL)
50898 + return 0;
50899 +
50900 + if (real == -1)
50901 + realok = 1;
50902 + if (effective == -1)
50903 + effectiveok = 1;
50904 + if (fs == -1)
50905 + fsok = 1;
50906 +
50907 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
50908 + for (i = 0; i < num; i++) {
50909 + curgid = (int)gidlist[i];
50910 + if (real == curgid)
50911 + realok = 1;
50912 + if (effective == curgid)
50913 + effectiveok = 1;
50914 + if (fs == curgid)
50915 + fsok = 1;
50916 + }
50917 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
50918 + for (i = 0; i < num; i++) {
50919 + curgid = (int)gidlist[i];
50920 + if (real == curgid)
50921 + break;
50922 + if (effective == curgid)
50923 + break;
50924 + if (fs == curgid)
50925 + break;
50926 + }
50927 + /* not in deny list */
50928 + if (i == num) {
50929 + realok = 1;
50930 + effectiveok = 1;
50931 + fsok = 1;
50932 + }
50933 + }
50934 +
50935 + if (realok && effectiveok && fsok)
50936 + return 0;
50937 + else {
50938 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50939 + return 1;
50940 + }
50941 +}
50942 +
50943 +extern int gr_acl_is_capable(const int cap);
50944 +
50945 +void
50946 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50947 +{
50948 + struct acl_role_label *role = task->role;
50949 + struct acl_subject_label *subj = NULL;
50950 + struct acl_object_label *obj;
50951 + struct file *filp;
50952 +
50953 + if (unlikely(!(gr_status & GR_READY)))
50954 + return;
50955 +
50956 + filp = task->exec_file;
50957 +
50958 + /* kernel process, we'll give them the kernel role */
50959 + if (unlikely(!filp)) {
50960 + task->role = kernel_role;
50961 + task->acl = kernel_role->root_label;
50962 + return;
50963 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50964 + role = lookup_acl_role_label(task, uid, gid);
50965 +
50966 + /* don't change the role if we're not a privileged process */
50967 + if (role && task->role != role &&
50968 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
50969 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
50970 + return;
50971 +
50972 + /* perform subject lookup in possibly new role
50973 + we can use this result below in the case where role == task->role
50974 + */
50975 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50976 +
50977 + /* if we changed uid/gid, but result in the same role
50978 + and are using inheritance, don't lose the inherited subject
50979 + if current subject is other than what normal lookup
50980 + would result in, we arrived via inheritance, don't
50981 + lose subject
50982 + */
50983 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50984 + (subj == task->acl)))
50985 + task->acl = subj;
50986 +
50987 + task->role = role;
50988 +
50989 + task->is_writable = 0;
50990 +
50991 + /* ignore additional mmap checks for processes that are writable
50992 + by the default ACL */
50993 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50994 + if (unlikely(obj->mode & GR_WRITE))
50995 + task->is_writable = 1;
50996 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50997 + if (unlikely(obj->mode & GR_WRITE))
50998 + task->is_writable = 1;
50999 +
51000 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51001 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51002 +#endif
51003 +
51004 + gr_set_proc_res(task);
51005 +
51006 + return;
51007 +}
51008 +
51009 +int
51010 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
51011 + const int unsafe_flags)
51012 +{
51013 + struct task_struct *task = current;
51014 + struct acl_subject_label *newacl;
51015 + struct acl_object_label *obj;
51016 + __u32 retmode;
51017 +
51018 + if (unlikely(!(gr_status & GR_READY)))
51019 + return 0;
51020 +
51021 + newacl = chk_subj_label(dentry, mnt, task->role);
51022 +
51023 + task_lock(task);
51024 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
51025 + !(task->role->roletype & GR_ROLE_GOD) &&
51026 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
51027 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51028 + task_unlock(task);
51029 + if (unsafe_flags & LSM_UNSAFE_SHARE)
51030 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
51031 + else
51032 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
51033 + return -EACCES;
51034 + }
51035 + task_unlock(task);
51036 +
51037 + obj = chk_obj_label(dentry, mnt, task->acl);
51038 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
51039 +
51040 + if (!(task->acl->mode & GR_INHERITLEARN) &&
51041 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
51042 + if (obj->nested)
51043 + task->acl = obj->nested;
51044 + else
51045 + task->acl = newacl;
51046 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
51047 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
51048 +
51049 + task->is_writable = 0;
51050 +
51051 + /* ignore additional mmap checks for processes that are writable
51052 + by the default ACL */
51053 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
51054 + if (unlikely(obj->mode & GR_WRITE))
51055 + task->is_writable = 1;
51056 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
51057 + if (unlikely(obj->mode & GR_WRITE))
51058 + task->is_writable = 1;
51059 +
51060 + gr_set_proc_res(task);
51061 +
51062 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51063 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51064 +#endif
51065 + return 0;
51066 +}
51067 +
51068 +/* always called with valid inodev ptr */
51069 +static void
51070 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
51071 +{
51072 + struct acl_object_label *matchpo;
51073 + struct acl_subject_label *matchps;
51074 + struct acl_subject_label *subj;
51075 + struct acl_role_label *role;
51076 + unsigned int x;
51077 +
51078 + FOR_EACH_ROLE_START(role)
51079 + FOR_EACH_SUBJECT_START(role, subj, x)
51080 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
51081 + matchpo->mode |= GR_DELETED;
51082 + FOR_EACH_SUBJECT_END(subj,x)
51083 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51084 + if (subj->inode == ino && subj->device == dev)
51085 + subj->mode |= GR_DELETED;
51086 + FOR_EACH_NESTED_SUBJECT_END(subj)
51087 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
51088 + matchps->mode |= GR_DELETED;
51089 + FOR_EACH_ROLE_END(role)
51090 +
51091 + inodev->nentry->deleted = 1;
51092 +
51093 + return;
51094 +}
51095 +
51096 +void
51097 +gr_handle_delete(const ino_t ino, const dev_t dev)
51098 +{
51099 + struct inodev_entry *inodev;
51100 +
51101 + if (unlikely(!(gr_status & GR_READY)))
51102 + return;
51103 +
51104 + write_lock(&gr_inode_lock);
51105 + inodev = lookup_inodev_entry(ino, dev);
51106 + if (inodev != NULL)
51107 + do_handle_delete(inodev, ino, dev);
51108 + write_unlock(&gr_inode_lock);
51109 +
51110 + return;
51111 +}
51112 +
51113 +static void
51114 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
51115 + const ino_t newinode, const dev_t newdevice,
51116 + struct acl_subject_label *subj)
51117 +{
51118 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
51119 + struct acl_object_label *match;
51120 +
51121 + match = subj->obj_hash[index];
51122 +
51123 + while (match && (match->inode != oldinode ||
51124 + match->device != olddevice ||
51125 + !(match->mode & GR_DELETED)))
51126 + match = match->next;
51127 +
51128 + if (match && (match->inode == oldinode)
51129 + && (match->device == olddevice)
51130 + && (match->mode & GR_DELETED)) {
51131 + if (match->prev == NULL) {
51132 + subj->obj_hash[index] = match->next;
51133 + if (match->next != NULL)
51134 + match->next->prev = NULL;
51135 + } else {
51136 + match->prev->next = match->next;
51137 + if (match->next != NULL)
51138 + match->next->prev = match->prev;
51139 + }
51140 + match->prev = NULL;
51141 + match->next = NULL;
51142 + match->inode = newinode;
51143 + match->device = newdevice;
51144 + match->mode &= ~GR_DELETED;
51145 +
51146 + insert_acl_obj_label(match, subj);
51147 + }
51148 +
51149 + return;
51150 +}
51151 +
51152 +static void
51153 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
51154 + const ino_t newinode, const dev_t newdevice,
51155 + struct acl_role_label *role)
51156 +{
51157 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
51158 + struct acl_subject_label *match;
51159 +
51160 + match = role->subj_hash[index];
51161 +
51162 + while (match && (match->inode != oldinode ||
51163 + match->device != olddevice ||
51164 + !(match->mode & GR_DELETED)))
51165 + match = match->next;
51166 +
51167 + if (match && (match->inode == oldinode)
51168 + && (match->device == olddevice)
51169 + && (match->mode & GR_DELETED)) {
51170 + if (match->prev == NULL) {
51171 + role->subj_hash[index] = match->next;
51172 + if (match->next != NULL)
51173 + match->next->prev = NULL;
51174 + } else {
51175 + match->prev->next = match->next;
51176 + if (match->next != NULL)
51177 + match->next->prev = match->prev;
51178 + }
51179 + match->prev = NULL;
51180 + match->next = NULL;
51181 + match->inode = newinode;
51182 + match->device = newdevice;
51183 + match->mode &= ~GR_DELETED;
51184 +
51185 + insert_acl_subj_label(match, role);
51186 + }
51187 +
51188 + return;
51189 +}
51190 +
51191 +static void
51192 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
51193 + const ino_t newinode, const dev_t newdevice)
51194 +{
51195 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51196 + struct inodev_entry *match;
51197 +
51198 + match = inodev_set.i_hash[index];
51199 +
51200 + while (match && (match->nentry->inode != oldinode ||
51201 + match->nentry->device != olddevice || !match->nentry->deleted))
51202 + match = match->next;
51203 +
51204 + if (match && (match->nentry->inode == oldinode)
51205 + && (match->nentry->device == olddevice) &&
51206 + match->nentry->deleted) {
51207 + if (match->prev == NULL) {
51208 + inodev_set.i_hash[index] = match->next;
51209 + if (match->next != NULL)
51210 + match->next->prev = NULL;
51211 + } else {
51212 + match->prev->next = match->next;
51213 + if (match->next != NULL)
51214 + match->next->prev = match->prev;
51215 + }
51216 + match->prev = NULL;
51217 + match->next = NULL;
51218 + match->nentry->inode = newinode;
51219 + match->nentry->device = newdevice;
51220 + match->nentry->deleted = 0;
51221 +
51222 + insert_inodev_entry(match);
51223 + }
51224 +
51225 + return;
51226 +}
51227 +
51228 +static void
51229 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51230 +{
51231 + struct acl_subject_label *subj;
51232 + struct acl_role_label *role;
51233 + unsigned int x;
51234 +
51235 + FOR_EACH_ROLE_START(role)
51236 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51237 +
51238 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51239 + if ((subj->inode == ino) && (subj->device == dev)) {
51240 + subj->inode = ino;
51241 + subj->device = dev;
51242 + }
51243 + FOR_EACH_NESTED_SUBJECT_END(subj)
51244 + FOR_EACH_SUBJECT_START(role, subj, x)
51245 + update_acl_obj_label(matchn->inode, matchn->device,
51246 + ino, dev, subj);
51247 + FOR_EACH_SUBJECT_END(subj,x)
51248 + FOR_EACH_ROLE_END(role)
51249 +
51250 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51251 +
51252 + return;
51253 +}
51254 +
51255 +static void
51256 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51257 + const struct vfsmount *mnt)
51258 +{
51259 + ino_t ino = dentry->d_inode->i_ino;
51260 + dev_t dev = __get_dev(dentry);
51261 +
51262 + __do_handle_create(matchn, ino, dev);
51263 +
51264 + return;
51265 +}
51266 +
51267 +void
51268 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51269 +{
51270 + struct name_entry *matchn;
51271 +
51272 + if (unlikely(!(gr_status & GR_READY)))
51273 + return;
51274 +
51275 + preempt_disable();
51276 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51277 +
51278 + if (unlikely((unsigned long)matchn)) {
51279 + write_lock(&gr_inode_lock);
51280 + do_handle_create(matchn, dentry, mnt);
51281 + write_unlock(&gr_inode_lock);
51282 + }
51283 + preempt_enable();
51284 +
51285 + return;
51286 +}
51287 +
51288 +void
51289 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51290 +{
51291 + struct name_entry *matchn;
51292 +
51293 + if (unlikely(!(gr_status & GR_READY)))
51294 + return;
51295 +
51296 + preempt_disable();
51297 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51298 +
51299 + if (unlikely((unsigned long)matchn)) {
51300 + write_lock(&gr_inode_lock);
51301 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51302 + write_unlock(&gr_inode_lock);
51303 + }
51304 + preempt_enable();
51305 +
51306 + return;
51307 +}
51308 +
51309 +void
51310 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51311 + struct dentry *old_dentry,
51312 + struct dentry *new_dentry,
51313 + struct vfsmount *mnt, const __u8 replace)
51314 +{
51315 + struct name_entry *matchn;
51316 + struct inodev_entry *inodev;
51317 + struct inode *inode = new_dentry->d_inode;
51318 + ino_t old_ino = old_dentry->d_inode->i_ino;
51319 + dev_t old_dev = __get_dev(old_dentry);
51320 +
51321 + /* vfs_rename swaps the name and parent link for old_dentry and
51322 + new_dentry
51323 + at this point, old_dentry has the new name, parent link, and inode
51324 + for the renamed file
51325 + if a file is being replaced by a rename, new_dentry has the inode
51326 + and name for the replaced file
51327 + */
51328 +
51329 + if (unlikely(!(gr_status & GR_READY)))
51330 + return;
51331 +
51332 + preempt_disable();
51333 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51334 +
51335 + /* we wouldn't have to check d_inode if it weren't for
51336 + NFS silly-renaming
51337 + */
51338 +
51339 + write_lock(&gr_inode_lock);
51340 + if (unlikely(replace && inode)) {
51341 + ino_t new_ino = inode->i_ino;
51342 + dev_t new_dev = __get_dev(new_dentry);
51343 +
51344 + inodev = lookup_inodev_entry(new_ino, new_dev);
51345 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51346 + do_handle_delete(inodev, new_ino, new_dev);
51347 + }
51348 +
51349 + inodev = lookup_inodev_entry(old_ino, old_dev);
51350 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51351 + do_handle_delete(inodev, old_ino, old_dev);
51352 +
51353 + if (unlikely((unsigned long)matchn))
51354 + do_handle_create(matchn, old_dentry, mnt);
51355 +
51356 + write_unlock(&gr_inode_lock);
51357 + preempt_enable();
51358 +
51359 + return;
51360 +}
51361 +
51362 +static int
51363 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51364 + unsigned char **sum)
51365 +{
51366 + struct acl_role_label *r;
51367 + struct role_allowed_ip *ipp;
51368 + struct role_transition *trans;
51369 + unsigned int i;
51370 + int found = 0;
51371 + u32 curr_ip = current->signal->curr_ip;
51372 +
51373 + current->signal->saved_ip = curr_ip;
51374 +
51375 + /* check transition table */
51376 +
51377 + for (trans = current->role->transitions; trans; trans = trans->next) {
51378 + if (!strcmp(rolename, trans->rolename)) {
51379 + found = 1;
51380 + break;
51381 + }
51382 + }
51383 +
51384 + if (!found)
51385 + return 0;
51386 +
51387 + /* handle special roles that do not require authentication
51388 + and check ip */
51389 +
51390 + FOR_EACH_ROLE_START(r)
51391 + if (!strcmp(rolename, r->rolename) &&
51392 + (r->roletype & GR_ROLE_SPECIAL)) {
51393 + found = 0;
51394 + if (r->allowed_ips != NULL) {
51395 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51396 + if ((ntohl(curr_ip) & ipp->netmask) ==
51397 + (ntohl(ipp->addr) & ipp->netmask))
51398 + found = 1;
51399 + }
51400 + } else
51401 + found = 2;
51402 + if (!found)
51403 + return 0;
51404 +
51405 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51406 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51407 + *salt = NULL;
51408 + *sum = NULL;
51409 + return 1;
51410 + }
51411 + }
51412 + FOR_EACH_ROLE_END(r)
51413 +
51414 + for (i = 0; i < num_sprole_pws; i++) {
51415 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51416 + *salt = acl_special_roles[i]->salt;
51417 + *sum = acl_special_roles[i]->sum;
51418 + return 1;
51419 + }
51420 + }
51421 +
51422 + return 0;
51423 +}
51424 +
51425 +static void
51426 +assign_special_role(char *rolename)
51427 +{
51428 + struct acl_object_label *obj;
51429 + struct acl_role_label *r;
51430 + struct acl_role_label *assigned = NULL;
51431 + struct task_struct *tsk;
51432 + struct file *filp;
51433 +
51434 + FOR_EACH_ROLE_START(r)
51435 + if (!strcmp(rolename, r->rolename) &&
51436 + (r->roletype & GR_ROLE_SPECIAL)) {
51437 + assigned = r;
51438 + break;
51439 + }
51440 + FOR_EACH_ROLE_END(r)
51441 +
51442 + if (!assigned)
51443 + return;
51444 +
51445 + read_lock(&tasklist_lock);
51446 + read_lock(&grsec_exec_file_lock);
51447 +
51448 + tsk = current->real_parent;
51449 + if (tsk == NULL)
51450 + goto out_unlock;
51451 +
51452 + filp = tsk->exec_file;
51453 + if (filp == NULL)
51454 + goto out_unlock;
51455 +
51456 + tsk->is_writable = 0;
51457 +
51458 + tsk->acl_sp_role = 1;
51459 + tsk->acl_role_id = ++acl_sp_role_value;
51460 + tsk->role = assigned;
51461 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51462 +
51463 + /* ignore additional mmap checks for processes that are writable
51464 + by the default ACL */
51465 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51466 + if (unlikely(obj->mode & GR_WRITE))
51467 + tsk->is_writable = 1;
51468 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51469 + if (unlikely(obj->mode & GR_WRITE))
51470 + tsk->is_writable = 1;
51471 +
51472 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51473 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51474 +#endif
51475 +
51476 +out_unlock:
51477 + read_unlock(&grsec_exec_file_lock);
51478 + read_unlock(&tasklist_lock);
51479 + return;
51480 +}
51481 +
51482 +int gr_check_secure_terminal(struct task_struct *task)
51483 +{
51484 + struct task_struct *p, *p2, *p3;
51485 + struct files_struct *files;
51486 + struct fdtable *fdt;
51487 + struct file *our_file = NULL, *file;
51488 + int i;
51489 +
51490 + if (task->signal->tty == NULL)
51491 + return 1;
51492 +
51493 + files = get_files_struct(task);
51494 + if (files != NULL) {
51495 + rcu_read_lock();
51496 + fdt = files_fdtable(files);
51497 + for (i=0; i < fdt->max_fds; i++) {
51498 + file = fcheck_files(files, i);
51499 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51500 + get_file(file);
51501 + our_file = file;
51502 + }
51503 + }
51504 + rcu_read_unlock();
51505 + put_files_struct(files);
51506 + }
51507 +
51508 + if (our_file == NULL)
51509 + return 1;
51510 +
51511 + read_lock(&tasklist_lock);
51512 + do_each_thread(p2, p) {
51513 + files = get_files_struct(p);
51514 + if (files == NULL ||
51515 + (p->signal && p->signal->tty == task->signal->tty)) {
51516 + if (files != NULL)
51517 + put_files_struct(files);
51518 + continue;
51519 + }
51520 + rcu_read_lock();
51521 + fdt = files_fdtable(files);
51522 + for (i=0; i < fdt->max_fds; i++) {
51523 + file = fcheck_files(files, i);
51524 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51525 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51526 + p3 = task;
51527 + while (p3->pid > 0) {
51528 + if (p3 == p)
51529 + break;
51530 + p3 = p3->real_parent;
51531 + }
51532 + if (p3 == p)
51533 + break;
51534 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51535 + gr_handle_alertkill(p);
51536 + rcu_read_unlock();
51537 + put_files_struct(files);
51538 + read_unlock(&tasklist_lock);
51539 + fput(our_file);
51540 + return 0;
51541 + }
51542 + }
51543 + rcu_read_unlock();
51544 + put_files_struct(files);
51545 + } while_each_thread(p2, p);
51546 + read_unlock(&tasklist_lock);
51547 +
51548 + fput(our_file);
51549 + return 1;
51550 +}
51551 +
51552 +ssize_t
51553 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51554 +{
51555 + struct gr_arg_wrapper uwrap;
51556 + unsigned char *sprole_salt = NULL;
51557 + unsigned char *sprole_sum = NULL;
51558 + int error = sizeof (struct gr_arg_wrapper);
51559 + int error2 = 0;
51560 +
51561 + mutex_lock(&gr_dev_mutex);
51562 +
51563 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51564 + error = -EPERM;
51565 + goto out;
51566 + }
51567 +
51568 + if (count != sizeof (struct gr_arg_wrapper)) {
51569 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51570 + error = -EINVAL;
51571 + goto out;
51572 + }
51573 +
51574 +
51575 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51576 + gr_auth_expires = 0;
51577 + gr_auth_attempts = 0;
51578 + }
51579 +
51580 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51581 + error = -EFAULT;
51582 + goto out;
51583 + }
51584 +
51585 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51586 + error = -EINVAL;
51587 + goto out;
51588 + }
51589 +
51590 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51591 + error = -EFAULT;
51592 + goto out;
51593 + }
51594 +
51595 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51596 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51597 + time_after(gr_auth_expires, get_seconds())) {
51598 + error = -EBUSY;
51599 + goto out;
51600 + }
51601 +
51602 + /* if non-root trying to do anything other than use a special role,
51603 + do not attempt authentication, do not count towards authentication
51604 + locking
51605 + */
51606 +
51607 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51608 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51609 + current_uid()) {
51610 + error = -EPERM;
51611 + goto out;
51612 + }
51613 +
51614 + /* ensure pw and special role name are null terminated */
51615 +
51616 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51617 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51618 +
51619 + /* Okay.
51620 + * We have our enough of the argument structure..(we have yet
51621 + * to copy_from_user the tables themselves) . Copy the tables
51622 + * only if we need them, i.e. for loading operations. */
51623 +
51624 + switch (gr_usermode->mode) {
51625 + case GR_STATUS:
51626 + if (gr_status & GR_READY) {
51627 + error = 1;
51628 + if (!gr_check_secure_terminal(current))
51629 + error = 3;
51630 + } else
51631 + error = 2;
51632 + goto out;
51633 + case GR_SHUTDOWN:
51634 + if ((gr_status & GR_READY)
51635 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51636 + pax_open_kernel();
51637 + gr_status &= ~GR_READY;
51638 + pax_close_kernel();
51639 +
51640 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51641 + free_variables();
51642 + memset(gr_usermode, 0, sizeof (struct gr_arg));
51643 + memset(gr_system_salt, 0, GR_SALT_LEN);
51644 + memset(gr_system_sum, 0, GR_SHA_LEN);
51645 + } else if (gr_status & GR_READY) {
51646 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51647 + error = -EPERM;
51648 + } else {
51649 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51650 + error = -EAGAIN;
51651 + }
51652 + break;
51653 + case GR_ENABLE:
51654 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51655 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51656 + else {
51657 + if (gr_status & GR_READY)
51658 + error = -EAGAIN;
51659 + else
51660 + error = error2;
51661 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51662 + }
51663 + break;
51664 + case GR_RELOAD:
51665 + if (!(gr_status & GR_READY)) {
51666 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51667 + error = -EAGAIN;
51668 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51669 + preempt_disable();
51670 +
51671 + pax_open_kernel();
51672 + gr_status &= ~GR_READY;
51673 + pax_close_kernel();
51674 +
51675 + free_variables();
51676 + if (!(error2 = gracl_init(gr_usermode))) {
51677 + preempt_enable();
51678 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51679 + } else {
51680 + preempt_enable();
51681 + error = error2;
51682 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51683 + }
51684 + } else {
51685 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51686 + error = -EPERM;
51687 + }
51688 + break;
51689 + case GR_SEGVMOD:
51690 + if (unlikely(!(gr_status & GR_READY))) {
51691 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51692 + error = -EAGAIN;
51693 + break;
51694 + }
51695 +
51696 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51697 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51698 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51699 + struct acl_subject_label *segvacl;
51700 + segvacl =
51701 + lookup_acl_subj_label(gr_usermode->segv_inode,
51702 + gr_usermode->segv_device,
51703 + current->role);
51704 + if (segvacl) {
51705 + segvacl->crashes = 0;
51706 + segvacl->expires = 0;
51707 + }
51708 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51709 + gr_remove_uid(gr_usermode->segv_uid);
51710 + }
51711 + } else {
51712 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51713 + error = -EPERM;
51714 + }
51715 + break;
51716 + case GR_SPROLE:
51717 + case GR_SPROLEPAM:
51718 + if (unlikely(!(gr_status & GR_READY))) {
51719 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51720 + error = -EAGAIN;
51721 + break;
51722 + }
51723 +
51724 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51725 + current->role->expires = 0;
51726 + current->role->auth_attempts = 0;
51727 + }
51728 +
51729 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51730 + time_after(current->role->expires, get_seconds())) {
51731 + error = -EBUSY;
51732 + goto out;
51733 + }
51734 +
51735 + if (lookup_special_role_auth
51736 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51737 + && ((!sprole_salt && !sprole_sum)
51738 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51739 + char *p = "";
51740 + assign_special_role(gr_usermode->sp_role);
51741 + read_lock(&tasklist_lock);
51742 + if (current->real_parent)
51743 + p = current->real_parent->role->rolename;
51744 + read_unlock(&tasklist_lock);
51745 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51746 + p, acl_sp_role_value);
51747 + } else {
51748 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51749 + error = -EPERM;
51750 + if(!(current->role->auth_attempts++))
51751 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51752 +
51753 + goto out;
51754 + }
51755 + break;
51756 + case GR_UNSPROLE:
51757 + if (unlikely(!(gr_status & GR_READY))) {
51758 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51759 + error = -EAGAIN;
51760 + break;
51761 + }
51762 +
51763 + if (current->role->roletype & GR_ROLE_SPECIAL) {
51764 + char *p = "";
51765 + int i = 0;
51766 +
51767 + read_lock(&tasklist_lock);
51768 + if (current->real_parent) {
51769 + p = current->real_parent->role->rolename;
51770 + i = current->real_parent->acl_role_id;
51771 + }
51772 + read_unlock(&tasklist_lock);
51773 +
51774 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51775 + gr_set_acls(1);
51776 + } else {
51777 + error = -EPERM;
51778 + goto out;
51779 + }
51780 + break;
51781 + default:
51782 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51783 + error = -EINVAL;
51784 + break;
51785 + }
51786 +
51787 + if (error != -EPERM)
51788 + goto out;
51789 +
51790 + if(!(gr_auth_attempts++))
51791 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51792 +
51793 + out:
51794 + mutex_unlock(&gr_dev_mutex);
51795 + return error;
51796 +}
51797 +
51798 +/* must be called with
51799 + rcu_read_lock();
51800 + read_lock(&tasklist_lock);
51801 + read_lock(&grsec_exec_file_lock);
51802 +*/
51803 +int gr_apply_subject_to_task(struct task_struct *task)
51804 +{
51805 + struct acl_object_label *obj;
51806 + char *tmpname;
51807 + struct acl_subject_label *tmpsubj;
51808 + struct file *filp;
51809 + struct name_entry *nmatch;
51810 +
51811 + filp = task->exec_file;
51812 + if (filp == NULL)
51813 + return 0;
51814 +
51815 + /* the following is to apply the correct subject
51816 + on binaries running when the RBAC system
51817 + is enabled, when the binaries have been
51818 + replaced or deleted since their execution
51819 + -----
51820 + when the RBAC system starts, the inode/dev
51821 + from exec_file will be one the RBAC system
51822 + is unaware of. It only knows the inode/dev
51823 + of the present file on disk, or the absence
51824 + of it.
51825 + */
51826 + preempt_disable();
51827 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51828 +
51829 + nmatch = lookup_name_entry(tmpname);
51830 + preempt_enable();
51831 + tmpsubj = NULL;
51832 + if (nmatch) {
51833 + if (nmatch->deleted)
51834 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51835 + else
51836 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51837 + if (tmpsubj != NULL)
51838 + task->acl = tmpsubj;
51839 + }
51840 + if (tmpsubj == NULL)
51841 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51842 + task->role);
51843 + if (task->acl) {
51844 + task->is_writable = 0;
51845 + /* ignore additional mmap checks for processes that are writable
51846 + by the default ACL */
51847 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51848 + if (unlikely(obj->mode & GR_WRITE))
51849 + task->is_writable = 1;
51850 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51851 + if (unlikely(obj->mode & GR_WRITE))
51852 + task->is_writable = 1;
51853 +
51854 + gr_set_proc_res(task);
51855 +
51856 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51857 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51858 +#endif
51859 + } else {
51860 + return 1;
51861 + }
51862 +
51863 + return 0;
51864 +}
51865 +
51866 +int
51867 +gr_set_acls(const int type)
51868 +{
51869 + struct task_struct *task, *task2;
51870 + struct acl_role_label *role = current->role;
51871 + __u16 acl_role_id = current->acl_role_id;
51872 + const struct cred *cred;
51873 + int ret;
51874 +
51875 + rcu_read_lock();
51876 + read_lock(&tasklist_lock);
51877 + read_lock(&grsec_exec_file_lock);
51878 + do_each_thread(task2, task) {
51879 + /* check to see if we're called from the exit handler,
51880 + if so, only replace ACLs that have inherited the admin
51881 + ACL */
51882 +
51883 + if (type && (task->role != role ||
51884 + task->acl_role_id != acl_role_id))
51885 + continue;
51886 +
51887 + task->acl_role_id = 0;
51888 + task->acl_sp_role = 0;
51889 +
51890 + if (task->exec_file) {
51891 + cred = __task_cred(task);
51892 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51893 + ret = gr_apply_subject_to_task(task);
51894 + if (ret) {
51895 + read_unlock(&grsec_exec_file_lock);
51896 + read_unlock(&tasklist_lock);
51897 + rcu_read_unlock();
51898 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51899 + return ret;
51900 + }
51901 + } else {
51902 + // it's a kernel process
51903 + task->role = kernel_role;
51904 + task->acl = kernel_role->root_label;
51905 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51906 + task->acl->mode &= ~GR_PROCFIND;
51907 +#endif
51908 + }
51909 + } while_each_thread(task2, task);
51910 + read_unlock(&grsec_exec_file_lock);
51911 + read_unlock(&tasklist_lock);
51912 + rcu_read_unlock();
51913 +
51914 + return 0;
51915 +}
51916 +
51917 +void
51918 +gr_learn_resource(const struct task_struct *task,
51919 + const int res, const unsigned long wanted, const int gt)
51920 +{
51921 + struct acl_subject_label *acl;
51922 + const struct cred *cred;
51923 +
51924 + if (unlikely((gr_status & GR_READY) &&
51925 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51926 + goto skip_reslog;
51927 +
51928 +#ifdef CONFIG_GRKERNSEC_RESLOG
51929 + gr_log_resource(task, res, wanted, gt);
51930 +#endif
51931 + skip_reslog:
51932 +
51933 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51934 + return;
51935 +
51936 + acl = task->acl;
51937 +
51938 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51939 + !(acl->resmask & (1 << (unsigned short) res))))
51940 + return;
51941 +
51942 + if (wanted >= acl->res[res].rlim_cur) {
51943 + unsigned long res_add;
51944 +
51945 + res_add = wanted;
51946 + switch (res) {
51947 + case RLIMIT_CPU:
51948 + res_add += GR_RLIM_CPU_BUMP;
51949 + break;
51950 + case RLIMIT_FSIZE:
51951 + res_add += GR_RLIM_FSIZE_BUMP;
51952 + break;
51953 + case RLIMIT_DATA:
51954 + res_add += GR_RLIM_DATA_BUMP;
51955 + break;
51956 + case RLIMIT_STACK:
51957 + res_add += GR_RLIM_STACK_BUMP;
51958 + break;
51959 + case RLIMIT_CORE:
51960 + res_add += GR_RLIM_CORE_BUMP;
51961 + break;
51962 + case RLIMIT_RSS:
51963 + res_add += GR_RLIM_RSS_BUMP;
51964 + break;
51965 + case RLIMIT_NPROC:
51966 + res_add += GR_RLIM_NPROC_BUMP;
51967 + break;
51968 + case RLIMIT_NOFILE:
51969 + res_add += GR_RLIM_NOFILE_BUMP;
51970 + break;
51971 + case RLIMIT_MEMLOCK:
51972 + res_add += GR_RLIM_MEMLOCK_BUMP;
51973 + break;
51974 + case RLIMIT_AS:
51975 + res_add += GR_RLIM_AS_BUMP;
51976 + break;
51977 + case RLIMIT_LOCKS:
51978 + res_add += GR_RLIM_LOCKS_BUMP;
51979 + break;
51980 + case RLIMIT_SIGPENDING:
51981 + res_add += GR_RLIM_SIGPENDING_BUMP;
51982 + break;
51983 + case RLIMIT_MSGQUEUE:
51984 + res_add += GR_RLIM_MSGQUEUE_BUMP;
51985 + break;
51986 + case RLIMIT_NICE:
51987 + res_add += GR_RLIM_NICE_BUMP;
51988 + break;
51989 + case RLIMIT_RTPRIO:
51990 + res_add += GR_RLIM_RTPRIO_BUMP;
51991 + break;
51992 + case RLIMIT_RTTIME:
51993 + res_add += GR_RLIM_RTTIME_BUMP;
51994 + break;
51995 + }
51996 +
51997 + acl->res[res].rlim_cur = res_add;
51998 +
51999 + if (wanted > acl->res[res].rlim_max)
52000 + acl->res[res].rlim_max = res_add;
52001 +
52002 + /* only log the subject filename, since resource logging is supported for
52003 + single-subject learning only */
52004 + rcu_read_lock();
52005 + cred = __task_cred(task);
52006 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52007 + task->role->roletype, cred->uid, cred->gid, acl->filename,
52008 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
52009 + "", (unsigned long) res, &task->signal->saved_ip);
52010 + rcu_read_unlock();
52011 + }
52012 +
52013 + return;
52014 +}
52015 +
52016 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
52017 +void
52018 +pax_set_initial_flags(struct linux_binprm *bprm)
52019 +{
52020 + struct task_struct *task = current;
52021 + struct acl_subject_label *proc;
52022 + unsigned long flags;
52023 +
52024 + if (unlikely(!(gr_status & GR_READY)))
52025 + return;
52026 +
52027 + flags = pax_get_flags(task);
52028 +
52029 + proc = task->acl;
52030 +
52031 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
52032 + flags &= ~MF_PAX_PAGEEXEC;
52033 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
52034 + flags &= ~MF_PAX_SEGMEXEC;
52035 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
52036 + flags &= ~MF_PAX_RANDMMAP;
52037 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
52038 + flags &= ~MF_PAX_EMUTRAMP;
52039 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
52040 + flags &= ~MF_PAX_MPROTECT;
52041 +
52042 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
52043 + flags |= MF_PAX_PAGEEXEC;
52044 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
52045 + flags |= MF_PAX_SEGMEXEC;
52046 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
52047 + flags |= MF_PAX_RANDMMAP;
52048 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
52049 + flags |= MF_PAX_EMUTRAMP;
52050 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
52051 + flags |= MF_PAX_MPROTECT;
52052 +
52053 + pax_set_flags(task, flags);
52054 +
52055 + return;
52056 +}
52057 +#endif
52058 +
52059 +#ifdef CONFIG_SYSCTL
52060 +/* Eric Biederman likes breaking userland ABI and every inode-based security
52061 + system to save 35kb of memory */
52062 +
52063 +/* we modify the passed in filename, but adjust it back before returning */
52064 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
52065 +{
52066 + struct name_entry *nmatch;
52067 + char *p, *lastp = NULL;
52068 + struct acl_object_label *obj = NULL, *tmp;
52069 + struct acl_subject_label *tmpsubj;
52070 + char c = '\0';
52071 +
52072 + read_lock(&gr_inode_lock);
52073 +
52074 + p = name + len - 1;
52075 + do {
52076 + nmatch = lookup_name_entry(name);
52077 + if (lastp != NULL)
52078 + *lastp = c;
52079 +
52080 + if (nmatch == NULL)
52081 + goto next_component;
52082 + tmpsubj = current->acl;
52083 + do {
52084 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
52085 + if (obj != NULL) {
52086 + tmp = obj->globbed;
52087 + while (tmp) {
52088 + if (!glob_match(tmp->filename, name)) {
52089 + obj = tmp;
52090 + goto found_obj;
52091 + }
52092 + tmp = tmp->next;
52093 + }
52094 + goto found_obj;
52095 + }
52096 + } while ((tmpsubj = tmpsubj->parent_subject));
52097 +next_component:
52098 + /* end case */
52099 + if (p == name)
52100 + break;
52101 +
52102 + while (*p != '/')
52103 + p--;
52104 + if (p == name)
52105 + lastp = p + 1;
52106 + else {
52107 + lastp = p;
52108 + p--;
52109 + }
52110 + c = *lastp;
52111 + *lastp = '\0';
52112 + } while (1);
52113 +found_obj:
52114 + read_unlock(&gr_inode_lock);
52115 + /* obj returned will always be non-null */
52116 + return obj;
52117 +}
52118 +
52119 +/* returns 0 when allowing, non-zero on error
52120 + op of 0 is used for readdir, so we don't log the names of hidden files
52121 +*/
52122 +__u32
52123 +gr_handle_sysctl(const struct ctl_table *table, const int op)
52124 +{
52125 + struct ctl_table *tmp;
52126 + const char *proc_sys = "/proc/sys";
52127 + char *path;
52128 + struct acl_object_label *obj;
52129 + unsigned short len = 0, pos = 0, depth = 0, i;
52130 + __u32 err = 0;
52131 + __u32 mode = 0;
52132 +
52133 + if (unlikely(!(gr_status & GR_READY)))
52134 + return 0;
52135 +
52136 + /* for now, ignore operations on non-sysctl entries if it's not a
52137 + readdir*/
52138 + if (table->child != NULL && op != 0)
52139 + return 0;
52140 +
52141 + mode |= GR_FIND;
52142 + /* it's only a read if it's an entry, read on dirs is for readdir */
52143 + if (op & MAY_READ)
52144 + mode |= GR_READ;
52145 + if (op & MAY_WRITE)
52146 + mode |= GR_WRITE;
52147 +
52148 + preempt_disable();
52149 +
52150 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
52151 +
52152 + /* it's only a read/write if it's an actual entry, not a dir
52153 + (which are opened for readdir)
52154 + */
52155 +
52156 + /* convert the requested sysctl entry into a pathname */
52157 +
52158 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52159 + len += strlen(tmp->procname);
52160 + len++;
52161 + depth++;
52162 + }
52163 +
52164 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
52165 + /* deny */
52166 + goto out;
52167 + }
52168 +
52169 + memset(path, 0, PAGE_SIZE);
52170 +
52171 + memcpy(path, proc_sys, strlen(proc_sys));
52172 +
52173 + pos += strlen(proc_sys);
52174 +
52175 + for (; depth > 0; depth--) {
52176 + path[pos] = '/';
52177 + pos++;
52178 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52179 + if (depth == i) {
52180 + memcpy(path + pos, tmp->procname,
52181 + strlen(tmp->procname));
52182 + pos += strlen(tmp->procname);
52183 + }
52184 + i++;
52185 + }
52186 + }
52187 +
52188 + obj = gr_lookup_by_name(path, pos);
52189 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
52190 +
52191 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
52192 + ((err & mode) != mode))) {
52193 + __u32 new_mode = mode;
52194 +
52195 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52196 +
52197 + err = 0;
52198 + gr_log_learn_sysctl(path, new_mode);
52199 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52200 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52201 + err = -ENOENT;
52202 + } else if (!(err & GR_FIND)) {
52203 + err = -ENOENT;
52204 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52205 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52206 + path, (mode & GR_READ) ? " reading" : "",
52207 + (mode & GR_WRITE) ? " writing" : "");
52208 + err = -EACCES;
52209 + } else if ((err & mode) != mode) {
52210 + err = -EACCES;
52211 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52212 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52213 + path, (mode & GR_READ) ? " reading" : "",
52214 + (mode & GR_WRITE) ? " writing" : "");
52215 + err = 0;
52216 + } else
52217 + err = 0;
52218 +
52219 + out:
52220 + preempt_enable();
52221 +
52222 + return err;
52223 +}
52224 +#endif
52225 +
52226 +int
52227 +gr_handle_proc_ptrace(struct task_struct *task)
52228 +{
52229 + struct file *filp;
52230 + struct task_struct *tmp = task;
52231 + struct task_struct *curtemp = current;
52232 + __u32 retmode;
52233 +
52234 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52235 + if (unlikely(!(gr_status & GR_READY)))
52236 + return 0;
52237 +#endif
52238 +
52239 + read_lock(&tasklist_lock);
52240 + read_lock(&grsec_exec_file_lock);
52241 + filp = task->exec_file;
52242 +
52243 + while (tmp->pid > 0) {
52244 + if (tmp == curtemp)
52245 + break;
52246 + tmp = tmp->real_parent;
52247 + }
52248 +
52249 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52250 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52251 + read_unlock(&grsec_exec_file_lock);
52252 + read_unlock(&tasklist_lock);
52253 + return 1;
52254 + }
52255 +
52256 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52257 + if (!(gr_status & GR_READY)) {
52258 + read_unlock(&grsec_exec_file_lock);
52259 + read_unlock(&tasklist_lock);
52260 + return 0;
52261 + }
52262 +#endif
52263 +
52264 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52265 + read_unlock(&grsec_exec_file_lock);
52266 + read_unlock(&tasklist_lock);
52267 +
52268 + if (retmode & GR_NOPTRACE)
52269 + return 1;
52270 +
52271 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52272 + && (current->acl != task->acl || (current->acl != current->role->root_label
52273 + && current->pid != task->pid)))
52274 + return 1;
52275 +
52276 + return 0;
52277 +}
52278 +
52279 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52280 +{
52281 + if (unlikely(!(gr_status & GR_READY)))
52282 + return;
52283 +
52284 + if (!(current->role->roletype & GR_ROLE_GOD))
52285 + return;
52286 +
52287 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52288 + p->role->rolename, gr_task_roletype_to_char(p),
52289 + p->acl->filename);
52290 +}
52291 +
52292 +int
52293 +gr_handle_ptrace(struct task_struct *task, const long request)
52294 +{
52295 + struct task_struct *tmp = task;
52296 + struct task_struct *curtemp = current;
52297 + __u32 retmode;
52298 +
52299 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52300 + if (unlikely(!(gr_status & GR_READY)))
52301 + return 0;
52302 +#endif
52303 +
52304 + read_lock(&tasklist_lock);
52305 + while (tmp->pid > 0) {
52306 + if (tmp == curtemp)
52307 + break;
52308 + tmp = tmp->real_parent;
52309 + }
52310 +
52311 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52312 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52313 + read_unlock(&tasklist_lock);
52314 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52315 + return 1;
52316 + }
52317 + read_unlock(&tasklist_lock);
52318 +
52319 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52320 + if (!(gr_status & GR_READY))
52321 + return 0;
52322 +#endif
52323 +
52324 + read_lock(&grsec_exec_file_lock);
52325 + if (unlikely(!task->exec_file)) {
52326 + read_unlock(&grsec_exec_file_lock);
52327 + return 0;
52328 + }
52329 +
52330 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52331 + read_unlock(&grsec_exec_file_lock);
52332 +
52333 + if (retmode & GR_NOPTRACE) {
52334 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52335 + return 1;
52336 + }
52337 +
52338 + if (retmode & GR_PTRACERD) {
52339 + switch (request) {
52340 + case PTRACE_SEIZE:
52341 + case PTRACE_POKETEXT:
52342 + case PTRACE_POKEDATA:
52343 + case PTRACE_POKEUSR:
52344 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52345 + case PTRACE_SETREGS:
52346 + case PTRACE_SETFPREGS:
52347 +#endif
52348 +#ifdef CONFIG_X86
52349 + case PTRACE_SETFPXREGS:
52350 +#endif
52351 +#ifdef CONFIG_ALTIVEC
52352 + case PTRACE_SETVRREGS:
52353 +#endif
52354 + return 1;
52355 + default:
52356 + return 0;
52357 + }
52358 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
52359 + !(current->role->roletype & GR_ROLE_GOD) &&
52360 + (current->acl != task->acl)) {
52361 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52362 + return 1;
52363 + }
52364 +
52365 + return 0;
52366 +}
52367 +
52368 +static int is_writable_mmap(const struct file *filp)
52369 +{
52370 + struct task_struct *task = current;
52371 + struct acl_object_label *obj, *obj2;
52372 +
52373 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52374 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52375 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52376 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52377 + task->role->root_label);
52378 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52379 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52380 + return 1;
52381 + }
52382 + }
52383 + return 0;
52384 +}
52385 +
52386 +int
52387 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52388 +{
52389 + __u32 mode;
52390 +
52391 + if (unlikely(!file || !(prot & PROT_EXEC)))
52392 + return 1;
52393 +
52394 + if (is_writable_mmap(file))
52395 + return 0;
52396 +
52397 + mode =
52398 + gr_search_file(file->f_path.dentry,
52399 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52400 + file->f_path.mnt);
52401 +
52402 + if (!gr_tpe_allow(file))
52403 + return 0;
52404 +
52405 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52406 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52407 + return 0;
52408 + } else if (unlikely(!(mode & GR_EXEC))) {
52409 + return 0;
52410 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52411 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52412 + return 1;
52413 + }
52414 +
52415 + return 1;
52416 +}
52417 +
52418 +int
52419 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52420 +{
52421 + __u32 mode;
52422 +
52423 + if (unlikely(!file || !(prot & PROT_EXEC)))
52424 + return 1;
52425 +
52426 + if (is_writable_mmap(file))
52427 + return 0;
52428 +
52429 + mode =
52430 + gr_search_file(file->f_path.dentry,
52431 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52432 + file->f_path.mnt);
52433 +
52434 + if (!gr_tpe_allow(file))
52435 + return 0;
52436 +
52437 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52438 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52439 + return 0;
52440 + } else if (unlikely(!(mode & GR_EXEC))) {
52441 + return 0;
52442 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52443 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52444 + return 1;
52445 + }
52446 +
52447 + return 1;
52448 +}
52449 +
52450 +void
52451 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52452 +{
52453 + unsigned long runtime;
52454 + unsigned long cputime;
52455 + unsigned int wday, cday;
52456 + __u8 whr, chr;
52457 + __u8 wmin, cmin;
52458 + __u8 wsec, csec;
52459 + struct timespec timeval;
52460 +
52461 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52462 + !(task->acl->mode & GR_PROCACCT)))
52463 + return;
52464 +
52465 + do_posix_clock_monotonic_gettime(&timeval);
52466 + runtime = timeval.tv_sec - task->start_time.tv_sec;
52467 + wday = runtime / (3600 * 24);
52468 + runtime -= wday * (3600 * 24);
52469 + whr = runtime / 3600;
52470 + runtime -= whr * 3600;
52471 + wmin = runtime / 60;
52472 + runtime -= wmin * 60;
52473 + wsec = runtime;
52474 +
52475 + cputime = (task->utime + task->stime) / HZ;
52476 + cday = cputime / (3600 * 24);
52477 + cputime -= cday * (3600 * 24);
52478 + chr = cputime / 3600;
52479 + cputime -= chr * 3600;
52480 + cmin = cputime / 60;
52481 + cputime -= cmin * 60;
52482 + csec = cputime;
52483 +
52484 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52485 +
52486 + return;
52487 +}
52488 +
52489 +void gr_set_kernel_label(struct task_struct *task)
52490 +{
52491 + if (gr_status & GR_READY) {
52492 + task->role = kernel_role;
52493 + task->acl = kernel_role->root_label;
52494 + }
52495 + return;
52496 +}
52497 +
52498 +#ifdef CONFIG_TASKSTATS
52499 +int gr_is_taskstats_denied(int pid)
52500 +{
52501 + struct task_struct *task;
52502 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52503 + const struct cred *cred;
52504 +#endif
52505 + int ret = 0;
52506 +
52507 + /* restrict taskstats viewing to un-chrooted root users
52508 + who have the 'view' subject flag if the RBAC system is enabled
52509 + */
52510 +
52511 + rcu_read_lock();
52512 + read_lock(&tasklist_lock);
52513 + task = find_task_by_vpid(pid);
52514 + if (task) {
52515 +#ifdef CONFIG_GRKERNSEC_CHROOT
52516 + if (proc_is_chrooted(task))
52517 + ret = -EACCES;
52518 +#endif
52519 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52520 + cred = __task_cred(task);
52521 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52522 + if (cred->uid != 0)
52523 + ret = -EACCES;
52524 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52525 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52526 + ret = -EACCES;
52527 +#endif
52528 +#endif
52529 + if (gr_status & GR_READY) {
52530 + if (!(task->acl->mode & GR_VIEW))
52531 + ret = -EACCES;
52532 + }
52533 + } else
52534 + ret = -ENOENT;
52535 +
52536 + read_unlock(&tasklist_lock);
52537 + rcu_read_unlock();
52538 +
52539 + return ret;
52540 +}
52541 +#endif
52542 +
52543 +/* AUXV entries are filled via a descendant of search_binary_handler
52544 + after we've already applied the subject for the target
52545 +*/
52546 +int gr_acl_enable_at_secure(void)
52547 +{
52548 + if (unlikely(!(gr_status & GR_READY)))
52549 + return 0;
52550 +
52551 + if (current->acl->mode & GR_ATSECURE)
52552 + return 1;
52553 +
52554 + return 0;
52555 +}
52556 +
52557 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52558 +{
52559 + struct task_struct *task = current;
52560 + struct dentry *dentry = file->f_path.dentry;
52561 + struct vfsmount *mnt = file->f_path.mnt;
52562 + struct acl_object_label *obj, *tmp;
52563 + struct acl_subject_label *subj;
52564 + unsigned int bufsize;
52565 + int is_not_root;
52566 + char *path;
52567 + dev_t dev = __get_dev(dentry);
52568 +
52569 + if (unlikely(!(gr_status & GR_READY)))
52570 + return 1;
52571 +
52572 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52573 + return 1;
52574 +
52575 + /* ignore Eric Biederman */
52576 + if (IS_PRIVATE(dentry->d_inode))
52577 + return 1;
52578 +
52579 + subj = task->acl;
52580 + do {
52581 + obj = lookup_acl_obj_label(ino, dev, subj);
52582 + if (obj != NULL)
52583 + return (obj->mode & GR_FIND) ? 1 : 0;
52584 + } while ((subj = subj->parent_subject));
52585 +
52586 + /* this is purely an optimization since we're looking for an object
52587 + for the directory we're doing a readdir on
52588 + if it's possible for any globbed object to match the entry we're
52589 + filling into the directory, then the object we find here will be
52590 + an anchor point with attached globbed objects
52591 + */
52592 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52593 + if (obj->globbed == NULL)
52594 + return (obj->mode & GR_FIND) ? 1 : 0;
52595 +
52596 + is_not_root = ((obj->filename[0] == '/') &&
52597 + (obj->filename[1] == '\0')) ? 0 : 1;
52598 + bufsize = PAGE_SIZE - namelen - is_not_root;
52599 +
52600 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
52601 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52602 + return 1;
52603 +
52604 + preempt_disable();
52605 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52606 + bufsize);
52607 +
52608 + bufsize = strlen(path);
52609 +
52610 + /* if base is "/", don't append an additional slash */
52611 + if (is_not_root)
52612 + *(path + bufsize) = '/';
52613 + memcpy(path + bufsize + is_not_root, name, namelen);
52614 + *(path + bufsize + namelen + is_not_root) = '\0';
52615 +
52616 + tmp = obj->globbed;
52617 + while (tmp) {
52618 + if (!glob_match(tmp->filename, path)) {
52619 + preempt_enable();
52620 + return (tmp->mode & GR_FIND) ? 1 : 0;
52621 + }
52622 + tmp = tmp->next;
52623 + }
52624 + preempt_enable();
52625 + return (obj->mode & GR_FIND) ? 1 : 0;
52626 +}
52627 +
52628 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52629 +EXPORT_SYMBOL(gr_acl_is_enabled);
52630 +#endif
52631 +EXPORT_SYMBOL(gr_learn_resource);
52632 +EXPORT_SYMBOL(gr_set_kernel_label);
52633 +#ifdef CONFIG_SECURITY
52634 +EXPORT_SYMBOL(gr_check_user_change);
52635 +EXPORT_SYMBOL(gr_check_group_change);
52636 +#endif
52637 +
52638 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52639 new file mode 100644
52640 index 0000000..34fefda
52641 --- /dev/null
52642 +++ b/grsecurity/gracl_alloc.c
52643 @@ -0,0 +1,105 @@
52644 +#include <linux/kernel.h>
52645 +#include <linux/mm.h>
52646 +#include <linux/slab.h>
52647 +#include <linux/vmalloc.h>
52648 +#include <linux/gracl.h>
52649 +#include <linux/grsecurity.h>
52650 +
52651 +static unsigned long alloc_stack_next = 1;
52652 +static unsigned long alloc_stack_size = 1;
52653 +static void **alloc_stack;
52654 +
52655 +static __inline__ int
52656 +alloc_pop(void)
52657 +{
52658 + if (alloc_stack_next == 1)
52659 + return 0;
52660 +
52661 + kfree(alloc_stack[alloc_stack_next - 2]);
52662 +
52663 + alloc_stack_next--;
52664 +
52665 + return 1;
52666 +}
52667 +
52668 +static __inline__ int
52669 +alloc_push(void *buf)
52670 +{
52671 + if (alloc_stack_next >= alloc_stack_size)
52672 + return 1;
52673 +
52674 + alloc_stack[alloc_stack_next - 1] = buf;
52675 +
52676 + alloc_stack_next++;
52677 +
52678 + return 0;
52679 +}
52680 +
52681 +void *
52682 +acl_alloc(unsigned long len)
52683 +{
52684 + void *ret = NULL;
52685 +
52686 + if (!len || len > PAGE_SIZE)
52687 + goto out;
52688 +
52689 + ret = kmalloc(len, GFP_KERNEL);
52690 +
52691 + if (ret) {
52692 + if (alloc_push(ret)) {
52693 + kfree(ret);
52694 + ret = NULL;
52695 + }
52696 + }
52697 +
52698 +out:
52699 + return ret;
52700 +}
52701 +
52702 +void *
52703 +acl_alloc_num(unsigned long num, unsigned long len)
52704 +{
52705 + if (!len || (num > (PAGE_SIZE / len)))
52706 + return NULL;
52707 +
52708 + return acl_alloc(num * len);
52709 +}
52710 +
52711 +void
52712 +acl_free_all(void)
52713 +{
52714 + if (gr_acl_is_enabled() || !alloc_stack)
52715 + return;
52716 +
52717 + while (alloc_pop()) ;
52718 +
52719 + if (alloc_stack) {
52720 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52721 + kfree(alloc_stack);
52722 + else
52723 + vfree(alloc_stack);
52724 + }
52725 +
52726 + alloc_stack = NULL;
52727 + alloc_stack_size = 1;
52728 + alloc_stack_next = 1;
52729 +
52730 + return;
52731 +}
52732 +
52733 +int
52734 +acl_alloc_stack_init(unsigned long size)
52735 +{
52736 + if ((size * sizeof (void *)) <= PAGE_SIZE)
52737 + alloc_stack =
52738 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52739 + else
52740 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
52741 +
52742 + alloc_stack_size = size;
52743 +
52744 + if (!alloc_stack)
52745 + return 0;
52746 + else
52747 + return 1;
52748 +}
52749 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52750 new file mode 100644
52751 index 0000000..955ddfb
52752 --- /dev/null
52753 +++ b/grsecurity/gracl_cap.c
52754 @@ -0,0 +1,101 @@
52755 +#include <linux/kernel.h>
52756 +#include <linux/module.h>
52757 +#include <linux/sched.h>
52758 +#include <linux/gracl.h>
52759 +#include <linux/grsecurity.h>
52760 +#include <linux/grinternal.h>
52761 +
52762 +extern const char *captab_log[];
52763 +extern int captab_log_entries;
52764 +
52765 +int
52766 +gr_acl_is_capable(const int cap)
52767 +{
52768 + struct task_struct *task = current;
52769 + const struct cred *cred = current_cred();
52770 + struct acl_subject_label *curracl;
52771 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52772 + kernel_cap_t cap_audit = __cap_empty_set;
52773 +
52774 + if (!gr_acl_is_enabled())
52775 + return 1;
52776 +
52777 + curracl = task->acl;
52778 +
52779 + cap_drop = curracl->cap_lower;
52780 + cap_mask = curracl->cap_mask;
52781 + cap_audit = curracl->cap_invert_audit;
52782 +
52783 + while ((curracl = curracl->parent_subject)) {
52784 + /* if the cap isn't specified in the current computed mask but is specified in the
52785 + current level subject, and is lowered in the current level subject, then add
52786 + it to the set of dropped capabilities
52787 + otherwise, add the current level subject's mask to the current computed mask
52788 + */
52789 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52790 + cap_raise(cap_mask, cap);
52791 + if (cap_raised(curracl->cap_lower, cap))
52792 + cap_raise(cap_drop, cap);
52793 + if (cap_raised(curracl->cap_invert_audit, cap))
52794 + cap_raise(cap_audit, cap);
52795 + }
52796 + }
52797 +
52798 + if (!cap_raised(cap_drop, cap)) {
52799 + if (cap_raised(cap_audit, cap))
52800 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52801 + return 1;
52802 + }
52803 +
52804 + curracl = task->acl;
52805 +
52806 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52807 + && cap_raised(cred->cap_effective, cap)) {
52808 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52809 + task->role->roletype, cred->uid,
52810 + cred->gid, task->exec_file ?
52811 + gr_to_filename(task->exec_file->f_path.dentry,
52812 + task->exec_file->f_path.mnt) : curracl->filename,
52813 + curracl->filename, 0UL,
52814 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52815 + return 1;
52816 + }
52817 +
52818 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52819 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52820 + return 0;
52821 +}
52822 +
52823 +int
52824 +gr_acl_is_capable_nolog(const int cap)
52825 +{
52826 + struct acl_subject_label *curracl;
52827 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52828 +
52829 + if (!gr_acl_is_enabled())
52830 + return 1;
52831 +
52832 + curracl = current->acl;
52833 +
52834 + cap_drop = curracl->cap_lower;
52835 + cap_mask = curracl->cap_mask;
52836 +
52837 + while ((curracl = curracl->parent_subject)) {
52838 + /* if the cap isn't specified in the current computed mask but is specified in the
52839 + current level subject, and is lowered in the current level subject, then add
52840 + it to the set of dropped capabilities
52841 + otherwise, add the current level subject's mask to the current computed mask
52842 + */
52843 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52844 + cap_raise(cap_mask, cap);
52845 + if (cap_raised(curracl->cap_lower, cap))
52846 + cap_raise(cap_drop, cap);
52847 + }
52848 + }
52849 +
52850 + if (!cap_raised(cap_drop, cap))
52851 + return 1;
52852 +
52853 + return 0;
52854 +}
52855 +
52856 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52857 new file mode 100644
52858 index 0000000..88d0e87
52859 --- /dev/null
52860 +++ b/grsecurity/gracl_fs.c
52861 @@ -0,0 +1,435 @@
52862 +#include <linux/kernel.h>
52863 +#include <linux/sched.h>
52864 +#include <linux/types.h>
52865 +#include <linux/fs.h>
52866 +#include <linux/file.h>
52867 +#include <linux/stat.h>
52868 +#include <linux/grsecurity.h>
52869 +#include <linux/grinternal.h>
52870 +#include <linux/gracl.h>
52871 +
52872 +umode_t
52873 +gr_acl_umask(void)
52874 +{
52875 + if (unlikely(!gr_acl_is_enabled()))
52876 + return 0;
52877 +
52878 + return current->role->umask;
52879 +}
52880 +
52881 +__u32
52882 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52883 + const struct vfsmount * mnt)
52884 +{
52885 + __u32 mode;
52886 +
52887 + if (unlikely(!dentry->d_inode))
52888 + return GR_FIND;
52889 +
52890 + mode =
52891 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52892 +
52893 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52894 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52895 + return mode;
52896 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52897 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52898 + return 0;
52899 + } else if (unlikely(!(mode & GR_FIND)))
52900 + return 0;
52901 +
52902 + return GR_FIND;
52903 +}
52904 +
52905 +__u32
52906 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52907 + int acc_mode)
52908 +{
52909 + __u32 reqmode = GR_FIND;
52910 + __u32 mode;
52911 +
52912 + if (unlikely(!dentry->d_inode))
52913 + return reqmode;
52914 +
52915 + if (acc_mode & MAY_APPEND)
52916 + reqmode |= GR_APPEND;
52917 + else if (acc_mode & MAY_WRITE)
52918 + reqmode |= GR_WRITE;
52919 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52920 + reqmode |= GR_READ;
52921 +
52922 + mode =
52923 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52924 + mnt);
52925 +
52926 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52927 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52928 + reqmode & GR_READ ? " reading" : "",
52929 + reqmode & GR_WRITE ? " writing" : reqmode &
52930 + GR_APPEND ? " appending" : "");
52931 + return reqmode;
52932 + } else
52933 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52934 + {
52935 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52936 + reqmode & GR_READ ? " reading" : "",
52937 + reqmode & GR_WRITE ? " writing" : reqmode &
52938 + GR_APPEND ? " appending" : "");
52939 + return 0;
52940 + } else if (unlikely((mode & reqmode) != reqmode))
52941 + return 0;
52942 +
52943 + return reqmode;
52944 +}
52945 +
52946 +__u32
52947 +gr_acl_handle_creat(const struct dentry * dentry,
52948 + const struct dentry * p_dentry,
52949 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52950 + const int imode)
52951 +{
52952 + __u32 reqmode = GR_WRITE | GR_CREATE;
52953 + __u32 mode;
52954 +
52955 + if (acc_mode & MAY_APPEND)
52956 + reqmode |= GR_APPEND;
52957 + // if a directory was required or the directory already exists, then
52958 + // don't count this open as a read
52959 + if ((acc_mode & MAY_READ) &&
52960 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52961 + reqmode |= GR_READ;
52962 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52963 + reqmode |= GR_SETID;
52964 +
52965 + mode =
52966 + gr_check_create(dentry, p_dentry, p_mnt,
52967 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52968 +
52969 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52970 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52971 + reqmode & GR_READ ? " reading" : "",
52972 + reqmode & GR_WRITE ? " writing" : reqmode &
52973 + GR_APPEND ? " appending" : "");
52974 + return reqmode;
52975 + } else
52976 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52977 + {
52978 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52979 + reqmode & GR_READ ? " reading" : "",
52980 + reqmode & GR_WRITE ? " writing" : reqmode &
52981 + GR_APPEND ? " appending" : "");
52982 + return 0;
52983 + } else if (unlikely((mode & reqmode) != reqmode))
52984 + return 0;
52985 +
52986 + return reqmode;
52987 +}
52988 +
52989 +__u32
52990 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52991 + const int fmode)
52992 +{
52993 + __u32 mode, reqmode = GR_FIND;
52994 +
52995 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52996 + reqmode |= GR_EXEC;
52997 + if (fmode & S_IWOTH)
52998 + reqmode |= GR_WRITE;
52999 + if (fmode & S_IROTH)
53000 + reqmode |= GR_READ;
53001 +
53002 + mode =
53003 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
53004 + mnt);
53005 +
53006 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
53007 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
53008 + reqmode & GR_READ ? " reading" : "",
53009 + reqmode & GR_WRITE ? " writing" : "",
53010 + reqmode & GR_EXEC ? " executing" : "");
53011 + return reqmode;
53012 + } else
53013 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
53014 + {
53015 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
53016 + reqmode & GR_READ ? " reading" : "",
53017 + reqmode & GR_WRITE ? " writing" : "",
53018 + reqmode & GR_EXEC ? " executing" : "");
53019 + return 0;
53020 + } else if (unlikely((mode & reqmode) != reqmode))
53021 + return 0;
53022 +
53023 + return reqmode;
53024 +}
53025 +
53026 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
53027 +{
53028 + __u32 mode;
53029 +
53030 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
53031 +
53032 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53033 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
53034 + return mode;
53035 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53036 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
53037 + return 0;
53038 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53039 + return 0;
53040 +
53041 + return (reqmode);
53042 +}
53043 +
53044 +__u32
53045 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53046 +{
53047 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
53048 +}
53049 +
53050 +__u32
53051 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
53052 +{
53053 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
53054 +}
53055 +
53056 +__u32
53057 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
53058 +{
53059 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
53060 +}
53061 +
53062 +__u32
53063 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
53064 +{
53065 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
53066 +}
53067 +
53068 +__u32
53069 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
53070 + umode_t *modeptr)
53071 +{
53072 + umode_t mode;
53073 +
53074 + *modeptr &= ~gr_acl_umask();
53075 + mode = *modeptr;
53076 +
53077 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
53078 + return 1;
53079 +
53080 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
53081 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
53082 + GR_CHMOD_ACL_MSG);
53083 + } else {
53084 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
53085 + }
53086 +}
53087 +
53088 +__u32
53089 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
53090 +{
53091 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
53092 +}
53093 +
53094 +__u32
53095 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
53096 +{
53097 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
53098 +}
53099 +
53100 +__u32
53101 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
53102 +{
53103 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
53104 +}
53105 +
53106 +__u32
53107 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
53108 +{
53109 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
53110 + GR_UNIXCONNECT_ACL_MSG);
53111 +}
53112 +
53113 +/* hardlinks require at minimum create and link permission,
53114 + any additional privilege required is based on the
53115 + privilege of the file being linked to
53116 +*/
53117 +__u32
53118 +gr_acl_handle_link(const struct dentry * new_dentry,
53119 + const struct dentry * parent_dentry,
53120 + const struct vfsmount * parent_mnt,
53121 + const struct dentry * old_dentry,
53122 + const struct vfsmount * old_mnt, const char *to)
53123 +{
53124 + __u32 mode;
53125 + __u32 needmode = GR_CREATE | GR_LINK;
53126 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
53127 +
53128 + mode =
53129 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
53130 + old_mnt);
53131 +
53132 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
53133 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53134 + return mode;
53135 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53136 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53137 + return 0;
53138 + } else if (unlikely((mode & needmode) != needmode))
53139 + return 0;
53140 +
53141 + return 1;
53142 +}
53143 +
53144 +__u32
53145 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53146 + const struct dentry * parent_dentry,
53147 + const struct vfsmount * parent_mnt, const char *from)
53148 +{
53149 + __u32 needmode = GR_WRITE | GR_CREATE;
53150 + __u32 mode;
53151 +
53152 + mode =
53153 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
53154 + GR_CREATE | GR_AUDIT_CREATE |
53155 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
53156 +
53157 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
53158 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53159 + return mode;
53160 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53161 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53162 + return 0;
53163 + } else if (unlikely((mode & needmode) != needmode))
53164 + return 0;
53165 +
53166 + return (GR_WRITE | GR_CREATE);
53167 +}
53168 +
53169 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
53170 +{
53171 + __u32 mode;
53172 +
53173 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
53174 +
53175 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53176 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
53177 + return mode;
53178 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53179 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
53180 + return 0;
53181 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53182 + return 0;
53183 +
53184 + return (reqmode);
53185 +}
53186 +
53187 +__u32
53188 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53189 + const struct dentry * parent_dentry,
53190 + const struct vfsmount * parent_mnt,
53191 + const int mode)
53192 +{
53193 + __u32 reqmode = GR_WRITE | GR_CREATE;
53194 + if (unlikely(mode & (S_ISUID | S_ISGID)))
53195 + reqmode |= GR_SETID;
53196 +
53197 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53198 + reqmode, GR_MKNOD_ACL_MSG);
53199 +}
53200 +
53201 +__u32
53202 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
53203 + const struct dentry *parent_dentry,
53204 + const struct vfsmount *parent_mnt)
53205 +{
53206 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53207 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53208 +}
53209 +
53210 +#define RENAME_CHECK_SUCCESS(old, new) \
53211 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53212 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53213 +
53214 +int
53215 +gr_acl_handle_rename(struct dentry *new_dentry,
53216 + struct dentry *parent_dentry,
53217 + const struct vfsmount *parent_mnt,
53218 + struct dentry *old_dentry,
53219 + struct inode *old_parent_inode,
53220 + struct vfsmount *old_mnt, const char *newname)
53221 +{
53222 + __u32 comp1, comp2;
53223 + int error = 0;
53224 +
53225 + if (unlikely(!gr_acl_is_enabled()))
53226 + return 0;
53227 +
53228 + if (!new_dentry->d_inode) {
53229 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53230 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53231 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53232 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53233 + GR_DELETE | GR_AUDIT_DELETE |
53234 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53235 + GR_SUPPRESS, old_mnt);
53236 + } else {
53237 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53238 + GR_CREATE | GR_DELETE |
53239 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53240 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53241 + GR_SUPPRESS, parent_mnt);
53242 + comp2 =
53243 + gr_search_file(old_dentry,
53244 + GR_READ | GR_WRITE | GR_AUDIT_READ |
53245 + GR_DELETE | GR_AUDIT_DELETE |
53246 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53247 + }
53248 +
53249 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53250 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53251 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53252 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53253 + && !(comp2 & GR_SUPPRESS)) {
53254 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53255 + error = -EACCES;
53256 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53257 + error = -EACCES;
53258 +
53259 + return error;
53260 +}
53261 +
53262 +void
53263 +gr_acl_handle_exit(void)
53264 +{
53265 + u16 id;
53266 + char *rolename;
53267 + struct file *exec_file;
53268 +
53269 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53270 + !(current->role->roletype & GR_ROLE_PERSIST))) {
53271 + id = current->acl_role_id;
53272 + rolename = current->role->rolename;
53273 + gr_set_acls(1);
53274 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53275 + }
53276 +
53277 + write_lock(&grsec_exec_file_lock);
53278 + exec_file = current->exec_file;
53279 + current->exec_file = NULL;
53280 + write_unlock(&grsec_exec_file_lock);
53281 +
53282 + if (exec_file)
53283 + fput(exec_file);
53284 +}
53285 +
53286 +int
53287 +gr_acl_handle_procpidmem(const struct task_struct *task)
53288 +{
53289 + if (unlikely(!gr_acl_is_enabled()))
53290 + return 0;
53291 +
53292 + if (task != current && task->acl->mode & GR_PROTPROCFD)
53293 + return -EACCES;
53294 +
53295 + return 0;
53296 +}
53297 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53298 new file mode 100644
53299 index 0000000..17050ca
53300 --- /dev/null
53301 +++ b/grsecurity/gracl_ip.c
53302 @@ -0,0 +1,381 @@
53303 +#include <linux/kernel.h>
53304 +#include <asm/uaccess.h>
53305 +#include <asm/errno.h>
53306 +#include <net/sock.h>
53307 +#include <linux/file.h>
53308 +#include <linux/fs.h>
53309 +#include <linux/net.h>
53310 +#include <linux/in.h>
53311 +#include <linux/skbuff.h>
53312 +#include <linux/ip.h>
53313 +#include <linux/udp.h>
53314 +#include <linux/types.h>
53315 +#include <linux/sched.h>
53316 +#include <linux/netdevice.h>
53317 +#include <linux/inetdevice.h>
53318 +#include <linux/gracl.h>
53319 +#include <linux/grsecurity.h>
53320 +#include <linux/grinternal.h>
53321 +
53322 +#define GR_BIND 0x01
53323 +#define GR_CONNECT 0x02
53324 +#define GR_INVERT 0x04
53325 +#define GR_BINDOVERRIDE 0x08
53326 +#define GR_CONNECTOVERRIDE 0x10
53327 +#define GR_SOCK_FAMILY 0x20
53328 +
53329 +static const char * gr_protocols[IPPROTO_MAX] = {
53330 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53331 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53332 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53333 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53334 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53335 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53336 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53337 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53338 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53339 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53340 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53341 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53342 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53343 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53344 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53345 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53346 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53347 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53348 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53349 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53350 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53351 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53352 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53353 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53354 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53355 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53356 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53357 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53358 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53359 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53360 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53361 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53362 + };
53363 +
53364 +static const char * gr_socktypes[SOCK_MAX] = {
53365 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53366 + "unknown:7", "unknown:8", "unknown:9", "packet"
53367 + };
53368 +
53369 +static const char * gr_sockfamilies[AF_MAX+1] = {
53370 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53371 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53372 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53373 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53374 + };
53375 +
53376 +const char *
53377 +gr_proto_to_name(unsigned char proto)
53378 +{
53379 + return gr_protocols[proto];
53380 +}
53381 +
53382 +const char *
53383 +gr_socktype_to_name(unsigned char type)
53384 +{
53385 + return gr_socktypes[type];
53386 +}
53387 +
53388 +const char *
53389 +gr_sockfamily_to_name(unsigned char family)
53390 +{
53391 + return gr_sockfamilies[family];
53392 +}
53393 +
53394 +int
53395 +gr_search_socket(const int domain, const int type, const int protocol)
53396 +{
53397 + struct acl_subject_label *curr;
53398 + const struct cred *cred = current_cred();
53399 +
53400 + if (unlikely(!gr_acl_is_enabled()))
53401 + goto exit;
53402 +
53403 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
53404 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53405 + goto exit; // let the kernel handle it
53406 +
53407 + curr = current->acl;
53408 +
53409 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53410 + /* the family is allowed, if this is PF_INET allow it only if
53411 + the extra sock type/protocol checks pass */
53412 + if (domain == PF_INET)
53413 + goto inet_check;
53414 + goto exit;
53415 + } else {
53416 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53417 + __u32 fakeip = 0;
53418 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53419 + current->role->roletype, cred->uid,
53420 + cred->gid, current->exec_file ?
53421 + gr_to_filename(current->exec_file->f_path.dentry,
53422 + current->exec_file->f_path.mnt) :
53423 + curr->filename, curr->filename,
53424 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53425 + &current->signal->saved_ip);
53426 + goto exit;
53427 + }
53428 + goto exit_fail;
53429 + }
53430 +
53431 +inet_check:
53432 + /* the rest of this checking is for IPv4 only */
53433 + if (!curr->ips)
53434 + goto exit;
53435 +
53436 + if ((curr->ip_type & (1 << type)) &&
53437 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53438 + goto exit;
53439 +
53440 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53441 + /* we don't place acls on raw sockets , and sometimes
53442 + dgram/ip sockets are opened for ioctl and not
53443 + bind/connect, so we'll fake a bind learn log */
53444 + if (type == SOCK_RAW || type == SOCK_PACKET) {
53445 + __u32 fakeip = 0;
53446 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53447 + current->role->roletype, cred->uid,
53448 + cred->gid, current->exec_file ?
53449 + gr_to_filename(current->exec_file->f_path.dentry,
53450 + current->exec_file->f_path.mnt) :
53451 + curr->filename, curr->filename,
53452 + &fakeip, 0, type,
53453 + protocol, GR_CONNECT, &current->signal->saved_ip);
53454 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53455 + __u32 fakeip = 0;
53456 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53457 + current->role->roletype, cred->uid,
53458 + cred->gid, current->exec_file ?
53459 + gr_to_filename(current->exec_file->f_path.dentry,
53460 + current->exec_file->f_path.mnt) :
53461 + curr->filename, curr->filename,
53462 + &fakeip, 0, type,
53463 + protocol, GR_BIND, &current->signal->saved_ip);
53464 + }
53465 + /* we'll log when they use connect or bind */
53466 + goto exit;
53467 + }
53468 +
53469 +exit_fail:
53470 + if (domain == PF_INET)
53471 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53472 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
53473 + else
53474 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53475 + gr_socktype_to_name(type), protocol);
53476 +
53477 + return 0;
53478 +exit:
53479 + return 1;
53480 +}
53481 +
53482 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53483 +{
53484 + if ((ip->mode & mode) &&
53485 + (ip_port >= ip->low) &&
53486 + (ip_port <= ip->high) &&
53487 + ((ntohl(ip_addr) & our_netmask) ==
53488 + (ntohl(our_addr) & our_netmask))
53489 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53490 + && (ip->type & (1 << type))) {
53491 + if (ip->mode & GR_INVERT)
53492 + return 2; // specifically denied
53493 + else
53494 + return 1; // allowed
53495 + }
53496 +
53497 + return 0; // not specifically allowed, may continue parsing
53498 +}
53499 +
53500 +static int
53501 +gr_search_connectbind(const int full_mode, struct sock *sk,
53502 + struct sockaddr_in *addr, const int type)
53503 +{
53504 + char iface[IFNAMSIZ] = {0};
53505 + struct acl_subject_label *curr;
53506 + struct acl_ip_label *ip;
53507 + struct inet_sock *isk;
53508 + struct net_device *dev;
53509 + struct in_device *idev;
53510 + unsigned long i;
53511 + int ret;
53512 + int mode = full_mode & (GR_BIND | GR_CONNECT);
53513 + __u32 ip_addr = 0;
53514 + __u32 our_addr;
53515 + __u32 our_netmask;
53516 + char *p;
53517 + __u16 ip_port = 0;
53518 + const struct cred *cred = current_cred();
53519 +
53520 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53521 + return 0;
53522 +
53523 + curr = current->acl;
53524 + isk = inet_sk(sk);
53525 +
53526 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53527 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53528 + addr->sin_addr.s_addr = curr->inaddr_any_override;
53529 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53530 + struct sockaddr_in saddr;
53531 + int err;
53532 +
53533 + saddr.sin_family = AF_INET;
53534 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
53535 + saddr.sin_port = isk->inet_sport;
53536 +
53537 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53538 + if (err)
53539 + return err;
53540 +
53541 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53542 + if (err)
53543 + return err;
53544 + }
53545 +
53546 + if (!curr->ips)
53547 + return 0;
53548 +
53549 + ip_addr = addr->sin_addr.s_addr;
53550 + ip_port = ntohs(addr->sin_port);
53551 +
53552 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53553 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53554 + current->role->roletype, cred->uid,
53555 + cred->gid, current->exec_file ?
53556 + gr_to_filename(current->exec_file->f_path.dentry,
53557 + current->exec_file->f_path.mnt) :
53558 + curr->filename, curr->filename,
53559 + &ip_addr, ip_port, type,
53560 + sk->sk_protocol, mode, &current->signal->saved_ip);
53561 + return 0;
53562 + }
53563 +
53564 + for (i = 0; i < curr->ip_num; i++) {
53565 + ip = *(curr->ips + i);
53566 + if (ip->iface != NULL) {
53567 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
53568 + p = strchr(iface, ':');
53569 + if (p != NULL)
53570 + *p = '\0';
53571 + dev = dev_get_by_name(sock_net(sk), iface);
53572 + if (dev == NULL)
53573 + continue;
53574 + idev = in_dev_get(dev);
53575 + if (idev == NULL) {
53576 + dev_put(dev);
53577 + continue;
53578 + }
53579 + rcu_read_lock();
53580 + for_ifa(idev) {
53581 + if (!strcmp(ip->iface, ifa->ifa_label)) {
53582 + our_addr = ifa->ifa_address;
53583 + our_netmask = 0xffffffff;
53584 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53585 + if (ret == 1) {
53586 + rcu_read_unlock();
53587 + in_dev_put(idev);
53588 + dev_put(dev);
53589 + return 0;
53590 + } else if (ret == 2) {
53591 + rcu_read_unlock();
53592 + in_dev_put(idev);
53593 + dev_put(dev);
53594 + goto denied;
53595 + }
53596 + }
53597 + } endfor_ifa(idev);
53598 + rcu_read_unlock();
53599 + in_dev_put(idev);
53600 + dev_put(dev);
53601 + } else {
53602 + our_addr = ip->addr;
53603 + our_netmask = ip->netmask;
53604 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53605 + if (ret == 1)
53606 + return 0;
53607 + else if (ret == 2)
53608 + goto denied;
53609 + }
53610 + }
53611 +
53612 +denied:
53613 + if (mode == GR_BIND)
53614 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53615 + else if (mode == GR_CONNECT)
53616 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53617 +
53618 + return -EACCES;
53619 +}
53620 +
53621 +int
53622 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53623 +{
53624 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53625 +}
53626 +
53627 +int
53628 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53629 +{
53630 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53631 +}
53632 +
53633 +int gr_search_listen(struct socket *sock)
53634 +{
53635 + struct sock *sk = sock->sk;
53636 + struct sockaddr_in addr;
53637 +
53638 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53639 + addr.sin_port = inet_sk(sk)->inet_sport;
53640 +
53641 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53642 +}
53643 +
53644 +int gr_search_accept(struct socket *sock)
53645 +{
53646 + struct sock *sk = sock->sk;
53647 + struct sockaddr_in addr;
53648 +
53649 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53650 + addr.sin_port = inet_sk(sk)->inet_sport;
53651 +
53652 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53653 +}
53654 +
53655 +int
53656 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53657 +{
53658 + if (addr)
53659 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53660 + else {
53661 + struct sockaddr_in sin;
53662 + const struct inet_sock *inet = inet_sk(sk);
53663 +
53664 + sin.sin_addr.s_addr = inet->inet_daddr;
53665 + sin.sin_port = inet->inet_dport;
53666 +
53667 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53668 + }
53669 +}
53670 +
53671 +int
53672 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53673 +{
53674 + struct sockaddr_in sin;
53675 +
53676 + if (unlikely(skb->len < sizeof (struct udphdr)))
53677 + return 0; // skip this packet
53678 +
53679 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53680 + sin.sin_port = udp_hdr(skb)->source;
53681 +
53682 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53683 +}
53684 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53685 new file mode 100644
53686 index 0000000..25f54ef
53687 --- /dev/null
53688 +++ b/grsecurity/gracl_learn.c
53689 @@ -0,0 +1,207 @@
53690 +#include <linux/kernel.h>
53691 +#include <linux/mm.h>
53692 +#include <linux/sched.h>
53693 +#include <linux/poll.h>
53694 +#include <linux/string.h>
53695 +#include <linux/file.h>
53696 +#include <linux/types.h>
53697 +#include <linux/vmalloc.h>
53698 +#include <linux/grinternal.h>
53699 +
53700 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53701 + size_t count, loff_t *ppos);
53702 +extern int gr_acl_is_enabled(void);
53703 +
53704 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53705 +static int gr_learn_attached;
53706 +
53707 +/* use a 512k buffer */
53708 +#define LEARN_BUFFER_SIZE (512 * 1024)
53709 +
53710 +static DEFINE_SPINLOCK(gr_learn_lock);
53711 +static DEFINE_MUTEX(gr_learn_user_mutex);
53712 +
53713 +/* we need to maintain two buffers, so that the kernel context of grlearn
53714 + uses a semaphore around the userspace copying, and the other kernel contexts
53715 + use a spinlock when copying into the buffer, since they cannot sleep
53716 +*/
53717 +static char *learn_buffer;
53718 +static char *learn_buffer_user;
53719 +static int learn_buffer_len;
53720 +static int learn_buffer_user_len;
53721 +
53722 +static ssize_t
53723 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53724 +{
53725 + DECLARE_WAITQUEUE(wait, current);
53726 + ssize_t retval = 0;
53727 +
53728 + add_wait_queue(&learn_wait, &wait);
53729 + set_current_state(TASK_INTERRUPTIBLE);
53730 + do {
53731 + mutex_lock(&gr_learn_user_mutex);
53732 + spin_lock(&gr_learn_lock);
53733 + if (learn_buffer_len)
53734 + break;
53735 + spin_unlock(&gr_learn_lock);
53736 + mutex_unlock(&gr_learn_user_mutex);
53737 + if (file->f_flags & O_NONBLOCK) {
53738 + retval = -EAGAIN;
53739 + goto out;
53740 + }
53741 + if (signal_pending(current)) {
53742 + retval = -ERESTARTSYS;
53743 + goto out;
53744 + }
53745 +
53746 + schedule();
53747 + } while (1);
53748 +
53749 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53750 + learn_buffer_user_len = learn_buffer_len;
53751 + retval = learn_buffer_len;
53752 + learn_buffer_len = 0;
53753 +
53754 + spin_unlock(&gr_learn_lock);
53755 +
53756 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53757 + retval = -EFAULT;
53758 +
53759 + mutex_unlock(&gr_learn_user_mutex);
53760 +out:
53761 + set_current_state(TASK_RUNNING);
53762 + remove_wait_queue(&learn_wait, &wait);
53763 + return retval;
53764 +}
53765 +
53766 +static unsigned int
53767 +poll_learn(struct file * file, poll_table * wait)
53768 +{
53769 + poll_wait(file, &learn_wait, wait);
53770 +
53771 + if (learn_buffer_len)
53772 + return (POLLIN | POLLRDNORM);
53773 +
53774 + return 0;
53775 +}
53776 +
53777 +void
53778 +gr_clear_learn_entries(void)
53779 +{
53780 + char *tmp;
53781 +
53782 + mutex_lock(&gr_learn_user_mutex);
53783 + spin_lock(&gr_learn_lock);
53784 + tmp = learn_buffer;
53785 + learn_buffer = NULL;
53786 + spin_unlock(&gr_learn_lock);
53787 + if (tmp)
53788 + vfree(tmp);
53789 + if (learn_buffer_user != NULL) {
53790 + vfree(learn_buffer_user);
53791 + learn_buffer_user = NULL;
53792 + }
53793 + learn_buffer_len = 0;
53794 + mutex_unlock(&gr_learn_user_mutex);
53795 +
53796 + return;
53797 +}
53798 +
53799 +void
53800 +gr_add_learn_entry(const char *fmt, ...)
53801 +{
53802 + va_list args;
53803 + unsigned int len;
53804 +
53805 + if (!gr_learn_attached)
53806 + return;
53807 +
53808 + spin_lock(&gr_learn_lock);
53809 +
53810 + /* leave a gap at the end so we know when it's "full" but don't have to
53811 + compute the exact length of the string we're trying to append
53812 + */
53813 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53814 + spin_unlock(&gr_learn_lock);
53815 + wake_up_interruptible(&learn_wait);
53816 + return;
53817 + }
53818 + if (learn_buffer == NULL) {
53819 + spin_unlock(&gr_learn_lock);
53820 + return;
53821 + }
53822 +
53823 + va_start(args, fmt);
53824 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53825 + va_end(args);
53826 +
53827 + learn_buffer_len += len + 1;
53828 +
53829 + spin_unlock(&gr_learn_lock);
53830 + wake_up_interruptible(&learn_wait);
53831 +
53832 + return;
53833 +}
53834 +
53835 +static int
53836 +open_learn(struct inode *inode, struct file *file)
53837 +{
53838 + if (file->f_mode & FMODE_READ && gr_learn_attached)
53839 + return -EBUSY;
53840 + if (file->f_mode & FMODE_READ) {
53841 + int retval = 0;
53842 + mutex_lock(&gr_learn_user_mutex);
53843 + if (learn_buffer == NULL)
53844 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53845 + if (learn_buffer_user == NULL)
53846 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53847 + if (learn_buffer == NULL) {
53848 + retval = -ENOMEM;
53849 + goto out_error;
53850 + }
53851 + if (learn_buffer_user == NULL) {
53852 + retval = -ENOMEM;
53853 + goto out_error;
53854 + }
53855 + learn_buffer_len = 0;
53856 + learn_buffer_user_len = 0;
53857 + gr_learn_attached = 1;
53858 +out_error:
53859 + mutex_unlock(&gr_learn_user_mutex);
53860 + return retval;
53861 + }
53862 + return 0;
53863 +}
53864 +
53865 +static int
53866 +close_learn(struct inode *inode, struct file *file)
53867 +{
53868 + if (file->f_mode & FMODE_READ) {
53869 + char *tmp = NULL;
53870 + mutex_lock(&gr_learn_user_mutex);
53871 + spin_lock(&gr_learn_lock);
53872 + tmp = learn_buffer;
53873 + learn_buffer = NULL;
53874 + spin_unlock(&gr_learn_lock);
53875 + if (tmp)
53876 + vfree(tmp);
53877 + if (learn_buffer_user != NULL) {
53878 + vfree(learn_buffer_user);
53879 + learn_buffer_user = NULL;
53880 + }
53881 + learn_buffer_len = 0;
53882 + learn_buffer_user_len = 0;
53883 + gr_learn_attached = 0;
53884 + mutex_unlock(&gr_learn_user_mutex);
53885 + }
53886 +
53887 + return 0;
53888 +}
53889 +
53890 +const struct file_operations grsec_fops = {
53891 + .read = read_learn,
53892 + .write = write_grsec_handler,
53893 + .open = open_learn,
53894 + .release = close_learn,
53895 + .poll = poll_learn,
53896 +};
53897 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53898 new file mode 100644
53899 index 0000000..39645c9
53900 --- /dev/null
53901 +++ b/grsecurity/gracl_res.c
53902 @@ -0,0 +1,68 @@
53903 +#include <linux/kernel.h>
53904 +#include <linux/sched.h>
53905 +#include <linux/gracl.h>
53906 +#include <linux/grinternal.h>
53907 +
53908 +static const char *restab_log[] = {
53909 + [RLIMIT_CPU] = "RLIMIT_CPU",
53910 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53911 + [RLIMIT_DATA] = "RLIMIT_DATA",
53912 + [RLIMIT_STACK] = "RLIMIT_STACK",
53913 + [RLIMIT_CORE] = "RLIMIT_CORE",
53914 + [RLIMIT_RSS] = "RLIMIT_RSS",
53915 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
53916 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53917 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53918 + [RLIMIT_AS] = "RLIMIT_AS",
53919 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53920 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53921 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53922 + [RLIMIT_NICE] = "RLIMIT_NICE",
53923 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53924 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53925 + [GR_CRASH_RES] = "RLIMIT_CRASH"
53926 +};
53927 +
53928 +void
53929 +gr_log_resource(const struct task_struct *task,
53930 + const int res, const unsigned long wanted, const int gt)
53931 +{
53932 + const struct cred *cred;
53933 + unsigned long rlim;
53934 +
53935 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
53936 + return;
53937 +
53938 + // not yet supported resource
53939 + if (unlikely(!restab_log[res]))
53940 + return;
53941 +
53942 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53943 + rlim = task_rlimit_max(task, res);
53944 + else
53945 + rlim = task_rlimit(task, res);
53946 +
53947 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53948 + return;
53949 +
53950 + rcu_read_lock();
53951 + cred = __task_cred(task);
53952 +
53953 + if (res == RLIMIT_NPROC &&
53954 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53955 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53956 + goto out_rcu_unlock;
53957 + else if (res == RLIMIT_MEMLOCK &&
53958 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53959 + goto out_rcu_unlock;
53960 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53961 + goto out_rcu_unlock;
53962 + rcu_read_unlock();
53963 +
53964 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53965 +
53966 + return;
53967 +out_rcu_unlock:
53968 + rcu_read_unlock();
53969 + return;
53970 +}
53971 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53972 new file mode 100644
53973 index 0000000..5556be3
53974 --- /dev/null
53975 +++ b/grsecurity/gracl_segv.c
53976 @@ -0,0 +1,299 @@
53977 +#include <linux/kernel.h>
53978 +#include <linux/mm.h>
53979 +#include <asm/uaccess.h>
53980 +#include <asm/errno.h>
53981 +#include <asm/mman.h>
53982 +#include <net/sock.h>
53983 +#include <linux/file.h>
53984 +#include <linux/fs.h>
53985 +#include <linux/net.h>
53986 +#include <linux/in.h>
53987 +#include <linux/slab.h>
53988 +#include <linux/types.h>
53989 +#include <linux/sched.h>
53990 +#include <linux/timer.h>
53991 +#include <linux/gracl.h>
53992 +#include <linux/grsecurity.h>
53993 +#include <linux/grinternal.h>
53994 +
53995 +static struct crash_uid *uid_set;
53996 +static unsigned short uid_used;
53997 +static DEFINE_SPINLOCK(gr_uid_lock);
53998 +extern rwlock_t gr_inode_lock;
53999 +extern struct acl_subject_label *
54000 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
54001 + struct acl_role_label *role);
54002 +
54003 +#ifdef CONFIG_BTRFS_FS
54004 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
54005 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
54006 +#endif
54007 +
54008 +static inline dev_t __get_dev(const struct dentry *dentry)
54009 +{
54010 +#ifdef CONFIG_BTRFS_FS
54011 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
54012 + return get_btrfs_dev_from_inode(dentry->d_inode);
54013 + else
54014 +#endif
54015 + return dentry->d_inode->i_sb->s_dev;
54016 +}
54017 +
54018 +int
54019 +gr_init_uidset(void)
54020 +{
54021 + uid_set =
54022 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
54023 + uid_used = 0;
54024 +
54025 + return uid_set ? 1 : 0;
54026 +}
54027 +
54028 +void
54029 +gr_free_uidset(void)
54030 +{
54031 + if (uid_set)
54032 + kfree(uid_set);
54033 +
54034 + return;
54035 +}
54036 +
54037 +int
54038 +gr_find_uid(const uid_t uid)
54039 +{
54040 + struct crash_uid *tmp = uid_set;
54041 + uid_t buid;
54042 + int low = 0, high = uid_used - 1, mid;
54043 +
54044 + while (high >= low) {
54045 + mid = (low + high) >> 1;
54046 + buid = tmp[mid].uid;
54047 + if (buid == uid)
54048 + return mid;
54049 + if (buid > uid)
54050 + high = mid - 1;
54051 + if (buid < uid)
54052 + low = mid + 1;
54053 + }
54054 +
54055 + return -1;
54056 +}
54057 +
54058 +static __inline__ void
54059 +gr_insertsort(void)
54060 +{
54061 + unsigned short i, j;
54062 + struct crash_uid index;
54063 +
54064 + for (i = 1; i < uid_used; i++) {
54065 + index = uid_set[i];
54066 + j = i;
54067 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
54068 + uid_set[j] = uid_set[j - 1];
54069 + j--;
54070 + }
54071 + uid_set[j] = index;
54072 + }
54073 +
54074 + return;
54075 +}
54076 +
54077 +static __inline__ void
54078 +gr_insert_uid(const uid_t uid, const unsigned long expires)
54079 +{
54080 + int loc;
54081 +
54082 + if (uid_used == GR_UIDTABLE_MAX)
54083 + return;
54084 +
54085 + loc = gr_find_uid(uid);
54086 +
54087 + if (loc >= 0) {
54088 + uid_set[loc].expires = expires;
54089 + return;
54090 + }
54091 +
54092 + uid_set[uid_used].uid = uid;
54093 + uid_set[uid_used].expires = expires;
54094 + uid_used++;
54095 +
54096 + gr_insertsort();
54097 +
54098 + return;
54099 +}
54100 +
54101 +void
54102 +gr_remove_uid(const unsigned short loc)
54103 +{
54104 + unsigned short i;
54105 +
54106 + for (i = loc + 1; i < uid_used; i++)
54107 + uid_set[i - 1] = uid_set[i];
54108 +
54109 + uid_used--;
54110 +
54111 + return;
54112 +}
54113 +
54114 +int
54115 +gr_check_crash_uid(const uid_t uid)
54116 +{
54117 + int loc;
54118 + int ret = 0;
54119 +
54120 + if (unlikely(!gr_acl_is_enabled()))
54121 + return 0;
54122 +
54123 + spin_lock(&gr_uid_lock);
54124 + loc = gr_find_uid(uid);
54125 +
54126 + if (loc < 0)
54127 + goto out_unlock;
54128 +
54129 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
54130 + gr_remove_uid(loc);
54131 + else
54132 + ret = 1;
54133 +
54134 +out_unlock:
54135 + spin_unlock(&gr_uid_lock);
54136 + return ret;
54137 +}
54138 +
54139 +static __inline__ int
54140 +proc_is_setxid(const struct cred *cred)
54141 +{
54142 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
54143 + cred->uid != cred->fsuid)
54144 + return 1;
54145 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
54146 + cred->gid != cred->fsgid)
54147 + return 1;
54148 +
54149 + return 0;
54150 +}
54151 +
54152 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
54153 +
54154 +void
54155 +gr_handle_crash(struct task_struct *task, const int sig)
54156 +{
54157 + struct acl_subject_label *curr;
54158 + struct task_struct *tsk, *tsk2;
54159 + const struct cred *cred;
54160 + const struct cred *cred2;
54161 +
54162 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
54163 + return;
54164 +
54165 + if (unlikely(!gr_acl_is_enabled()))
54166 + return;
54167 +
54168 + curr = task->acl;
54169 +
54170 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
54171 + return;
54172 +
54173 + if (time_before_eq(curr->expires, get_seconds())) {
54174 + curr->expires = 0;
54175 + curr->crashes = 0;
54176 + }
54177 +
54178 + curr->crashes++;
54179 +
54180 + if (!curr->expires)
54181 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
54182 +
54183 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54184 + time_after(curr->expires, get_seconds())) {
54185 + rcu_read_lock();
54186 + cred = __task_cred(task);
54187 + if (cred->uid && proc_is_setxid(cred)) {
54188 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54189 + spin_lock(&gr_uid_lock);
54190 + gr_insert_uid(cred->uid, curr->expires);
54191 + spin_unlock(&gr_uid_lock);
54192 + curr->expires = 0;
54193 + curr->crashes = 0;
54194 + read_lock(&tasklist_lock);
54195 + do_each_thread(tsk2, tsk) {
54196 + cred2 = __task_cred(tsk);
54197 + if (tsk != task && cred2->uid == cred->uid)
54198 + gr_fake_force_sig(SIGKILL, tsk);
54199 + } while_each_thread(tsk2, tsk);
54200 + read_unlock(&tasklist_lock);
54201 + } else {
54202 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54203 + read_lock(&tasklist_lock);
54204 + read_lock(&grsec_exec_file_lock);
54205 + do_each_thread(tsk2, tsk) {
54206 + if (likely(tsk != task)) {
54207 + // if this thread has the same subject as the one that triggered
54208 + // RES_CRASH and it's the same binary, kill it
54209 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54210 + gr_fake_force_sig(SIGKILL, tsk);
54211 + }
54212 + } while_each_thread(tsk2, tsk);
54213 + read_unlock(&grsec_exec_file_lock);
54214 + read_unlock(&tasklist_lock);
54215 + }
54216 + rcu_read_unlock();
54217 + }
54218 +
54219 + return;
54220 +}
54221 +
54222 +int
54223 +gr_check_crash_exec(const struct file *filp)
54224 +{
54225 + struct acl_subject_label *curr;
54226 +
54227 + if (unlikely(!gr_acl_is_enabled()))
54228 + return 0;
54229 +
54230 + read_lock(&gr_inode_lock);
54231 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54232 + __get_dev(filp->f_path.dentry),
54233 + current->role);
54234 + read_unlock(&gr_inode_lock);
54235 +
54236 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54237 + (!curr->crashes && !curr->expires))
54238 + return 0;
54239 +
54240 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54241 + time_after(curr->expires, get_seconds()))
54242 + return 1;
54243 + else if (time_before_eq(curr->expires, get_seconds())) {
54244 + curr->crashes = 0;
54245 + curr->expires = 0;
54246 + }
54247 +
54248 + return 0;
54249 +}
54250 +
54251 +void
54252 +gr_handle_alertkill(struct task_struct *task)
54253 +{
54254 + struct acl_subject_label *curracl;
54255 + __u32 curr_ip;
54256 + struct task_struct *p, *p2;
54257 +
54258 + if (unlikely(!gr_acl_is_enabled()))
54259 + return;
54260 +
54261 + curracl = task->acl;
54262 + curr_ip = task->signal->curr_ip;
54263 +
54264 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54265 + read_lock(&tasklist_lock);
54266 + do_each_thread(p2, p) {
54267 + if (p->signal->curr_ip == curr_ip)
54268 + gr_fake_force_sig(SIGKILL, p);
54269 + } while_each_thread(p2, p);
54270 + read_unlock(&tasklist_lock);
54271 + } else if (curracl->mode & GR_KILLPROC)
54272 + gr_fake_force_sig(SIGKILL, task);
54273 +
54274 + return;
54275 +}
54276 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54277 new file mode 100644
54278 index 0000000..9d83a69
54279 --- /dev/null
54280 +++ b/grsecurity/gracl_shm.c
54281 @@ -0,0 +1,40 @@
54282 +#include <linux/kernel.h>
54283 +#include <linux/mm.h>
54284 +#include <linux/sched.h>
54285 +#include <linux/file.h>
54286 +#include <linux/ipc.h>
54287 +#include <linux/gracl.h>
54288 +#include <linux/grsecurity.h>
54289 +#include <linux/grinternal.h>
54290 +
54291 +int
54292 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54293 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54294 +{
54295 + struct task_struct *task;
54296 +
54297 + if (!gr_acl_is_enabled())
54298 + return 1;
54299 +
54300 + rcu_read_lock();
54301 + read_lock(&tasklist_lock);
54302 +
54303 + task = find_task_by_vpid(shm_cprid);
54304 +
54305 + if (unlikely(!task))
54306 + task = find_task_by_vpid(shm_lapid);
54307 +
54308 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54309 + (task->pid == shm_lapid)) &&
54310 + (task->acl->mode & GR_PROTSHM) &&
54311 + (task->acl != current->acl))) {
54312 + read_unlock(&tasklist_lock);
54313 + rcu_read_unlock();
54314 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54315 + return 0;
54316 + }
54317 + read_unlock(&tasklist_lock);
54318 + rcu_read_unlock();
54319 +
54320 + return 1;
54321 +}
54322 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54323 new file mode 100644
54324 index 0000000..bc0be01
54325 --- /dev/null
54326 +++ b/grsecurity/grsec_chdir.c
54327 @@ -0,0 +1,19 @@
54328 +#include <linux/kernel.h>
54329 +#include <linux/sched.h>
54330 +#include <linux/fs.h>
54331 +#include <linux/file.h>
54332 +#include <linux/grsecurity.h>
54333 +#include <linux/grinternal.h>
54334 +
54335 +void
54336 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54337 +{
54338 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54339 + if ((grsec_enable_chdir && grsec_enable_group &&
54340 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54341 + !grsec_enable_group)) {
54342 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54343 + }
54344 +#endif
54345 + return;
54346 +}
54347 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54348 new file mode 100644
54349 index 0000000..a2dc675
54350 --- /dev/null
54351 +++ b/grsecurity/grsec_chroot.c
54352 @@ -0,0 +1,351 @@
54353 +#include <linux/kernel.h>
54354 +#include <linux/module.h>
54355 +#include <linux/sched.h>
54356 +#include <linux/file.h>
54357 +#include <linux/fs.h>
54358 +#include <linux/mount.h>
54359 +#include <linux/types.h>
54360 +#include <linux/pid_namespace.h>
54361 +#include <linux/grsecurity.h>
54362 +#include <linux/grinternal.h>
54363 +
54364 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54365 +{
54366 +#ifdef CONFIG_GRKERNSEC
54367 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54368 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54369 + task->gr_is_chrooted = 1;
54370 + else
54371 + task->gr_is_chrooted = 0;
54372 +
54373 + task->gr_chroot_dentry = path->dentry;
54374 +#endif
54375 + return;
54376 +}
54377 +
54378 +void gr_clear_chroot_entries(struct task_struct *task)
54379 +{
54380 +#ifdef CONFIG_GRKERNSEC
54381 + task->gr_is_chrooted = 0;
54382 + task->gr_chroot_dentry = NULL;
54383 +#endif
54384 + return;
54385 +}
54386 +
54387 +int
54388 +gr_handle_chroot_unix(const pid_t pid)
54389 +{
54390 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54391 + struct task_struct *p;
54392 +
54393 + if (unlikely(!grsec_enable_chroot_unix))
54394 + return 1;
54395 +
54396 + if (likely(!proc_is_chrooted(current)))
54397 + return 1;
54398 +
54399 + rcu_read_lock();
54400 + read_lock(&tasklist_lock);
54401 + p = find_task_by_vpid_unrestricted(pid);
54402 + if (unlikely(p && !have_same_root(current, p))) {
54403 + read_unlock(&tasklist_lock);
54404 + rcu_read_unlock();
54405 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54406 + return 0;
54407 + }
54408 + read_unlock(&tasklist_lock);
54409 + rcu_read_unlock();
54410 +#endif
54411 + return 1;
54412 +}
54413 +
54414 +int
54415 +gr_handle_chroot_nice(void)
54416 +{
54417 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54418 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54419 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54420 + return -EPERM;
54421 + }
54422 +#endif
54423 + return 0;
54424 +}
54425 +
54426 +int
54427 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54428 +{
54429 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54430 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54431 + && proc_is_chrooted(current)) {
54432 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54433 + return -EACCES;
54434 + }
54435 +#endif
54436 + return 0;
54437 +}
54438 +
54439 +int
54440 +gr_handle_chroot_rawio(const struct inode *inode)
54441 +{
54442 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54443 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54444 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54445 + return 1;
54446 +#endif
54447 + return 0;
54448 +}
54449 +
54450 +int
54451 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54452 +{
54453 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54454 + struct task_struct *p;
54455 + int ret = 0;
54456 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54457 + return ret;
54458 +
54459 + read_lock(&tasklist_lock);
54460 + do_each_pid_task(pid, type, p) {
54461 + if (!have_same_root(current, p)) {
54462 + ret = 1;
54463 + goto out;
54464 + }
54465 + } while_each_pid_task(pid, type, p);
54466 +out:
54467 + read_unlock(&tasklist_lock);
54468 + return ret;
54469 +#endif
54470 + return 0;
54471 +}
54472 +
54473 +int
54474 +gr_pid_is_chrooted(struct task_struct *p)
54475 +{
54476 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54477 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54478 + return 0;
54479 +
54480 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54481 + !have_same_root(current, p)) {
54482 + return 1;
54483 + }
54484 +#endif
54485 + return 0;
54486 +}
54487 +
54488 +EXPORT_SYMBOL(gr_pid_is_chrooted);
54489 +
54490 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54491 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54492 +{
54493 + struct path path, currentroot;
54494 + int ret = 0;
54495 +
54496 + path.dentry = (struct dentry *)u_dentry;
54497 + path.mnt = (struct vfsmount *)u_mnt;
54498 + get_fs_root(current->fs, &currentroot);
54499 + if (path_is_under(&path, &currentroot))
54500 + ret = 1;
54501 + path_put(&currentroot);
54502 +
54503 + return ret;
54504 +}
54505 +#endif
54506 +
54507 +int
54508 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54509 +{
54510 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54511 + if (!grsec_enable_chroot_fchdir)
54512 + return 1;
54513 +
54514 + if (!proc_is_chrooted(current))
54515 + return 1;
54516 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54517 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54518 + return 0;
54519 + }
54520 +#endif
54521 + return 1;
54522 +}
54523 +
54524 +int
54525 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54526 + const time_t shm_createtime)
54527 +{
54528 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54529 + struct task_struct *p;
54530 + time_t starttime;
54531 +
54532 + if (unlikely(!grsec_enable_chroot_shmat))
54533 + return 1;
54534 +
54535 + if (likely(!proc_is_chrooted(current)))
54536 + return 1;
54537 +
54538 + rcu_read_lock();
54539 + read_lock(&tasklist_lock);
54540 +
54541 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54542 + starttime = p->start_time.tv_sec;
54543 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54544 + if (have_same_root(current, p)) {
54545 + goto allow;
54546 + } else {
54547 + read_unlock(&tasklist_lock);
54548 + rcu_read_unlock();
54549 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54550 + return 0;
54551 + }
54552 + }
54553 + /* creator exited, pid reuse, fall through to next check */
54554 + }
54555 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54556 + if (unlikely(!have_same_root(current, p))) {
54557 + read_unlock(&tasklist_lock);
54558 + rcu_read_unlock();
54559 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54560 + return 0;
54561 + }
54562 + }
54563 +
54564 +allow:
54565 + read_unlock(&tasklist_lock);
54566 + rcu_read_unlock();
54567 +#endif
54568 + return 1;
54569 +}
54570 +
54571 +void
54572 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54573 +{
54574 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54575 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54576 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54577 +#endif
54578 + return;
54579 +}
54580 +
54581 +int
54582 +gr_handle_chroot_mknod(const struct dentry *dentry,
54583 + const struct vfsmount *mnt, const int mode)
54584 +{
54585 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54586 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54587 + proc_is_chrooted(current)) {
54588 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54589 + return -EPERM;
54590 + }
54591 +#endif
54592 + return 0;
54593 +}
54594 +
54595 +int
54596 +gr_handle_chroot_mount(const struct dentry *dentry,
54597 + const struct vfsmount *mnt, const char *dev_name)
54598 +{
54599 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54600 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54601 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54602 + return -EPERM;
54603 + }
54604 +#endif
54605 + return 0;
54606 +}
54607 +
54608 +int
54609 +gr_handle_chroot_pivot(void)
54610 +{
54611 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54612 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54613 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54614 + return -EPERM;
54615 + }
54616 +#endif
54617 + return 0;
54618 +}
54619 +
54620 +int
54621 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54622 +{
54623 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54624 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54625 + !gr_is_outside_chroot(dentry, mnt)) {
54626 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54627 + return -EPERM;
54628 + }
54629 +#endif
54630 + return 0;
54631 +}
54632 +
54633 +extern const char *captab_log[];
54634 +extern int captab_log_entries;
54635 +
54636 +int
54637 +gr_chroot_is_capable(const int cap)
54638 +{
54639 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54640 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54641 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54642 + if (cap_raised(chroot_caps, cap)) {
54643 + const struct cred *creds = current_cred();
54644 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54645 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54646 + }
54647 + return 0;
54648 + }
54649 + }
54650 +#endif
54651 + return 1;
54652 +}
54653 +
54654 +int
54655 +gr_chroot_is_capable_nolog(const int cap)
54656 +{
54657 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54658 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54659 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54660 + if (cap_raised(chroot_caps, cap)) {
54661 + return 0;
54662 + }
54663 + }
54664 +#endif
54665 + return 1;
54666 +}
54667 +
54668 +int
54669 +gr_handle_chroot_sysctl(const int op)
54670 +{
54671 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54672 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54673 + proc_is_chrooted(current))
54674 + return -EACCES;
54675 +#endif
54676 + return 0;
54677 +}
54678 +
54679 +void
54680 +gr_handle_chroot_chdir(struct path *path)
54681 +{
54682 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54683 + if (grsec_enable_chroot_chdir)
54684 + set_fs_pwd(current->fs, path);
54685 +#endif
54686 + return;
54687 +}
54688 +
54689 +int
54690 +gr_handle_chroot_chmod(const struct dentry *dentry,
54691 + const struct vfsmount *mnt, const int mode)
54692 +{
54693 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54694 + /* allow chmod +s on directories, but not files */
54695 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54696 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54697 + proc_is_chrooted(current)) {
54698 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54699 + return -EPERM;
54700 + }
54701 +#endif
54702 + return 0;
54703 +}
54704 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54705 new file mode 100644
54706 index 0000000..213ad8b
54707 --- /dev/null
54708 +++ b/grsecurity/grsec_disabled.c
54709 @@ -0,0 +1,437 @@
54710 +#include <linux/kernel.h>
54711 +#include <linux/module.h>
54712 +#include <linux/sched.h>
54713 +#include <linux/file.h>
54714 +#include <linux/fs.h>
54715 +#include <linux/kdev_t.h>
54716 +#include <linux/net.h>
54717 +#include <linux/in.h>
54718 +#include <linux/ip.h>
54719 +#include <linux/skbuff.h>
54720 +#include <linux/sysctl.h>
54721 +
54722 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54723 +void
54724 +pax_set_initial_flags(struct linux_binprm *bprm)
54725 +{
54726 + return;
54727 +}
54728 +#endif
54729 +
54730 +#ifdef CONFIG_SYSCTL
54731 +__u32
54732 +gr_handle_sysctl(const struct ctl_table * table, const int op)
54733 +{
54734 + return 0;
54735 +}
54736 +#endif
54737 +
54738 +#ifdef CONFIG_TASKSTATS
54739 +int gr_is_taskstats_denied(int pid)
54740 +{
54741 + return 0;
54742 +}
54743 +#endif
54744 +
54745 +int
54746 +gr_acl_is_enabled(void)
54747 +{
54748 + return 0;
54749 +}
54750 +
54751 +void
54752 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54753 +{
54754 + return;
54755 +}
54756 +
54757 +int
54758 +gr_handle_rawio(const struct inode *inode)
54759 +{
54760 + return 0;
54761 +}
54762 +
54763 +void
54764 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54765 +{
54766 + return;
54767 +}
54768 +
54769 +int
54770 +gr_handle_ptrace(struct task_struct *task, const long request)
54771 +{
54772 + return 0;
54773 +}
54774 +
54775 +int
54776 +gr_handle_proc_ptrace(struct task_struct *task)
54777 +{
54778 + return 0;
54779 +}
54780 +
54781 +void
54782 +gr_learn_resource(const struct task_struct *task,
54783 + const int res, const unsigned long wanted, const int gt)
54784 +{
54785 + return;
54786 +}
54787 +
54788 +int
54789 +gr_set_acls(const int type)
54790 +{
54791 + return 0;
54792 +}
54793 +
54794 +int
54795 +gr_check_hidden_task(const struct task_struct *tsk)
54796 +{
54797 + return 0;
54798 +}
54799 +
54800 +int
54801 +gr_check_protected_task(const struct task_struct *task)
54802 +{
54803 + return 0;
54804 +}
54805 +
54806 +int
54807 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54808 +{
54809 + return 0;
54810 +}
54811 +
54812 +void
54813 +gr_copy_label(struct task_struct *tsk)
54814 +{
54815 + return;
54816 +}
54817 +
54818 +void
54819 +gr_set_pax_flags(struct task_struct *task)
54820 +{
54821 + return;
54822 +}
54823 +
54824 +int
54825 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54826 + const int unsafe_share)
54827 +{
54828 + return 0;
54829 +}
54830 +
54831 +void
54832 +gr_handle_delete(const ino_t ino, const dev_t dev)
54833 +{
54834 + return;
54835 +}
54836 +
54837 +void
54838 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54839 +{
54840 + return;
54841 +}
54842 +
54843 +void
54844 +gr_handle_crash(struct task_struct *task, const int sig)
54845 +{
54846 + return;
54847 +}
54848 +
54849 +int
54850 +gr_check_crash_exec(const struct file *filp)
54851 +{
54852 + return 0;
54853 +}
54854 +
54855 +int
54856 +gr_check_crash_uid(const uid_t uid)
54857 +{
54858 + return 0;
54859 +}
54860 +
54861 +void
54862 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54863 + struct dentry *old_dentry,
54864 + struct dentry *new_dentry,
54865 + struct vfsmount *mnt, const __u8 replace)
54866 +{
54867 + return;
54868 +}
54869 +
54870 +int
54871 +gr_search_socket(const int family, const int type, const int protocol)
54872 +{
54873 + return 1;
54874 +}
54875 +
54876 +int
54877 +gr_search_connectbind(const int mode, const struct socket *sock,
54878 + const struct sockaddr_in *addr)
54879 +{
54880 + return 0;
54881 +}
54882 +
54883 +void
54884 +gr_handle_alertkill(struct task_struct *task)
54885 +{
54886 + return;
54887 +}
54888 +
54889 +__u32
54890 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54891 +{
54892 + return 1;
54893 +}
54894 +
54895 +__u32
54896 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54897 + const struct vfsmount * mnt)
54898 +{
54899 + return 1;
54900 +}
54901 +
54902 +__u32
54903 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54904 + int acc_mode)
54905 +{
54906 + return 1;
54907 +}
54908 +
54909 +__u32
54910 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54911 +{
54912 + return 1;
54913 +}
54914 +
54915 +__u32
54916 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54917 +{
54918 + return 1;
54919 +}
54920 +
54921 +int
54922 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54923 + unsigned int *vm_flags)
54924 +{
54925 + return 1;
54926 +}
54927 +
54928 +__u32
54929 +gr_acl_handle_truncate(const struct dentry * dentry,
54930 + const struct vfsmount * mnt)
54931 +{
54932 + return 1;
54933 +}
54934 +
54935 +__u32
54936 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54937 +{
54938 + return 1;
54939 +}
54940 +
54941 +__u32
54942 +gr_acl_handle_access(const struct dentry * dentry,
54943 + const struct vfsmount * mnt, const int fmode)
54944 +{
54945 + return 1;
54946 +}
54947 +
54948 +__u32
54949 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54950 + umode_t *mode)
54951 +{
54952 + return 1;
54953 +}
54954 +
54955 +__u32
54956 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54957 +{
54958 + return 1;
54959 +}
54960 +
54961 +__u32
54962 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54963 +{
54964 + return 1;
54965 +}
54966 +
54967 +void
54968 +grsecurity_init(void)
54969 +{
54970 + return;
54971 +}
54972 +
54973 +umode_t gr_acl_umask(void)
54974 +{
54975 + return 0;
54976 +}
54977 +
54978 +__u32
54979 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54980 + const struct dentry * parent_dentry,
54981 + const struct vfsmount * parent_mnt,
54982 + const int mode)
54983 +{
54984 + return 1;
54985 +}
54986 +
54987 +__u32
54988 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
54989 + const struct dentry * parent_dentry,
54990 + const struct vfsmount * parent_mnt)
54991 +{
54992 + return 1;
54993 +}
54994 +
54995 +__u32
54996 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54997 + const struct dentry * parent_dentry,
54998 + const struct vfsmount * parent_mnt, const char *from)
54999 +{
55000 + return 1;
55001 +}
55002 +
55003 +__u32
55004 +gr_acl_handle_link(const struct dentry * new_dentry,
55005 + const struct dentry * parent_dentry,
55006 + const struct vfsmount * parent_mnt,
55007 + const struct dentry * old_dentry,
55008 + const struct vfsmount * old_mnt, const char *to)
55009 +{
55010 + return 1;
55011 +}
55012 +
55013 +int
55014 +gr_acl_handle_rename(const struct dentry *new_dentry,
55015 + const struct dentry *parent_dentry,
55016 + const struct vfsmount *parent_mnt,
55017 + const struct dentry *old_dentry,
55018 + const struct inode *old_parent_inode,
55019 + const struct vfsmount *old_mnt, const char *newname)
55020 +{
55021 + return 0;
55022 +}
55023 +
55024 +int
55025 +gr_acl_handle_filldir(const struct file *file, const char *name,
55026 + const int namelen, const ino_t ino)
55027 +{
55028 + return 1;
55029 +}
55030 +
55031 +int
55032 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55033 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55034 +{
55035 + return 1;
55036 +}
55037 +
55038 +int
55039 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
55040 +{
55041 + return 0;
55042 +}
55043 +
55044 +int
55045 +gr_search_accept(const struct socket *sock)
55046 +{
55047 + return 0;
55048 +}
55049 +
55050 +int
55051 +gr_search_listen(const struct socket *sock)
55052 +{
55053 + return 0;
55054 +}
55055 +
55056 +int
55057 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
55058 +{
55059 + return 0;
55060 +}
55061 +
55062 +__u32
55063 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
55064 +{
55065 + return 1;
55066 +}
55067 +
55068 +__u32
55069 +gr_acl_handle_creat(const struct dentry * dentry,
55070 + const struct dentry * p_dentry,
55071 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55072 + const int imode)
55073 +{
55074 + return 1;
55075 +}
55076 +
55077 +void
55078 +gr_acl_handle_exit(void)
55079 +{
55080 + return;
55081 +}
55082 +
55083 +int
55084 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55085 +{
55086 + return 1;
55087 +}
55088 +
55089 +void
55090 +gr_set_role_label(const uid_t uid, const gid_t gid)
55091 +{
55092 + return;
55093 +}
55094 +
55095 +int
55096 +gr_acl_handle_procpidmem(const struct task_struct *task)
55097 +{
55098 + return 0;
55099 +}
55100 +
55101 +int
55102 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
55103 +{
55104 + return 0;
55105 +}
55106 +
55107 +int
55108 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
55109 +{
55110 + return 0;
55111 +}
55112 +
55113 +void
55114 +gr_set_kernel_label(struct task_struct *task)
55115 +{
55116 + return;
55117 +}
55118 +
55119 +int
55120 +gr_check_user_change(int real, int effective, int fs)
55121 +{
55122 + return 0;
55123 +}
55124 +
55125 +int
55126 +gr_check_group_change(int real, int effective, int fs)
55127 +{
55128 + return 0;
55129 +}
55130 +
55131 +int gr_acl_enable_at_secure(void)
55132 +{
55133 + return 0;
55134 +}
55135 +
55136 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55137 +{
55138 + return dentry->d_inode->i_sb->s_dev;
55139 +}
55140 +
55141 +EXPORT_SYMBOL(gr_learn_resource);
55142 +EXPORT_SYMBOL(gr_set_kernel_label);
55143 +#ifdef CONFIG_SECURITY
55144 +EXPORT_SYMBOL(gr_check_user_change);
55145 +EXPORT_SYMBOL(gr_check_group_change);
55146 +#endif
55147 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
55148 new file mode 100644
55149 index 0000000..2b05ada
55150 --- /dev/null
55151 +++ b/grsecurity/grsec_exec.c
55152 @@ -0,0 +1,146 @@
55153 +#include <linux/kernel.h>
55154 +#include <linux/sched.h>
55155 +#include <linux/file.h>
55156 +#include <linux/binfmts.h>
55157 +#include <linux/fs.h>
55158 +#include <linux/types.h>
55159 +#include <linux/grdefs.h>
55160 +#include <linux/grsecurity.h>
55161 +#include <linux/grinternal.h>
55162 +#include <linux/capability.h>
55163 +#include <linux/module.h>
55164 +
55165 +#include <asm/uaccess.h>
55166 +
55167 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55168 +static char gr_exec_arg_buf[132];
55169 +static DEFINE_MUTEX(gr_exec_arg_mutex);
55170 +#endif
55171 +
55172 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
55173 +
55174 +void
55175 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
55176 +{
55177 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55178 + char *grarg = gr_exec_arg_buf;
55179 + unsigned int i, x, execlen = 0;
55180 + char c;
55181 +
55182 + if (!((grsec_enable_execlog && grsec_enable_group &&
55183 + in_group_p(grsec_audit_gid))
55184 + || (grsec_enable_execlog && !grsec_enable_group)))
55185 + return;
55186 +
55187 + mutex_lock(&gr_exec_arg_mutex);
55188 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
55189 +
55190 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
55191 + const char __user *p;
55192 + unsigned int len;
55193 +
55194 + p = get_user_arg_ptr(argv, i);
55195 + if (IS_ERR(p))
55196 + goto log;
55197 +
55198 + len = strnlen_user(p, 128 - execlen);
55199 + if (len > 128 - execlen)
55200 + len = 128 - execlen;
55201 + else if (len > 0)
55202 + len--;
55203 + if (copy_from_user(grarg + execlen, p, len))
55204 + goto log;
55205 +
55206 + /* rewrite unprintable characters */
55207 + for (x = 0; x < len; x++) {
55208 + c = *(grarg + execlen + x);
55209 + if (c < 32 || c > 126)
55210 + *(grarg + execlen + x) = ' ';
55211 + }
55212 +
55213 + execlen += len;
55214 + *(grarg + execlen) = ' ';
55215 + *(grarg + execlen + 1) = '\0';
55216 + execlen++;
55217 + }
55218 +
55219 + log:
55220 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55221 + bprm->file->f_path.mnt, grarg);
55222 + mutex_unlock(&gr_exec_arg_mutex);
55223 +#endif
55224 + return;
55225 +}
55226 +
55227 +#ifdef CONFIG_GRKERNSEC
55228 +extern int gr_acl_is_capable(const int cap);
55229 +extern int gr_acl_is_capable_nolog(const int cap);
55230 +extern int gr_chroot_is_capable(const int cap);
55231 +extern int gr_chroot_is_capable_nolog(const int cap);
55232 +#endif
55233 +
55234 +const char *captab_log[] = {
55235 + "CAP_CHOWN",
55236 + "CAP_DAC_OVERRIDE",
55237 + "CAP_DAC_READ_SEARCH",
55238 + "CAP_FOWNER",
55239 + "CAP_FSETID",
55240 + "CAP_KILL",
55241 + "CAP_SETGID",
55242 + "CAP_SETUID",
55243 + "CAP_SETPCAP",
55244 + "CAP_LINUX_IMMUTABLE",
55245 + "CAP_NET_BIND_SERVICE",
55246 + "CAP_NET_BROADCAST",
55247 + "CAP_NET_ADMIN",
55248 + "CAP_NET_RAW",
55249 + "CAP_IPC_LOCK",
55250 + "CAP_IPC_OWNER",
55251 + "CAP_SYS_MODULE",
55252 + "CAP_SYS_RAWIO",
55253 + "CAP_SYS_CHROOT",
55254 + "CAP_SYS_PTRACE",
55255 + "CAP_SYS_PACCT",
55256 + "CAP_SYS_ADMIN",
55257 + "CAP_SYS_BOOT",
55258 + "CAP_SYS_NICE",
55259 + "CAP_SYS_RESOURCE",
55260 + "CAP_SYS_TIME",
55261 + "CAP_SYS_TTY_CONFIG",
55262 + "CAP_MKNOD",
55263 + "CAP_LEASE",
55264 + "CAP_AUDIT_WRITE",
55265 + "CAP_AUDIT_CONTROL",
55266 + "CAP_SETFCAP",
55267 + "CAP_MAC_OVERRIDE",
55268 + "CAP_MAC_ADMIN",
55269 + "CAP_SYSLOG",
55270 + "CAP_WAKE_ALARM"
55271 +};
55272 +
55273 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55274 +
55275 +int gr_is_capable(const int cap)
55276 +{
55277 +#ifdef CONFIG_GRKERNSEC
55278 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55279 + return 1;
55280 + return 0;
55281 +#else
55282 + return 1;
55283 +#endif
55284 +}
55285 +
55286 +int gr_is_capable_nolog(const int cap)
55287 +{
55288 +#ifdef CONFIG_GRKERNSEC
55289 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55290 + return 1;
55291 + return 0;
55292 +#else
55293 + return 1;
55294 +#endif
55295 +}
55296 +
55297 +EXPORT_SYMBOL(gr_is_capable);
55298 +EXPORT_SYMBOL(gr_is_capable_nolog);
55299 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55300 new file mode 100644
55301 index 0000000..d3ee748
55302 --- /dev/null
55303 +++ b/grsecurity/grsec_fifo.c
55304 @@ -0,0 +1,24 @@
55305 +#include <linux/kernel.h>
55306 +#include <linux/sched.h>
55307 +#include <linux/fs.h>
55308 +#include <linux/file.h>
55309 +#include <linux/grinternal.h>
55310 +
55311 +int
55312 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55313 + const struct dentry *dir, const int flag, const int acc_mode)
55314 +{
55315 +#ifdef CONFIG_GRKERNSEC_FIFO
55316 + const struct cred *cred = current_cred();
55317 +
55318 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55319 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55320 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55321 + (cred->fsuid != dentry->d_inode->i_uid)) {
55322 + if (!inode_permission(dentry->d_inode, acc_mode))
55323 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55324 + return -EACCES;
55325 + }
55326 +#endif
55327 + return 0;
55328 +}
55329 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55330 new file mode 100644
55331 index 0000000..8ca18bf
55332 --- /dev/null
55333 +++ b/grsecurity/grsec_fork.c
55334 @@ -0,0 +1,23 @@
55335 +#include <linux/kernel.h>
55336 +#include <linux/sched.h>
55337 +#include <linux/grsecurity.h>
55338 +#include <linux/grinternal.h>
55339 +#include <linux/errno.h>
55340 +
55341 +void
55342 +gr_log_forkfail(const int retval)
55343 +{
55344 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55345 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55346 + switch (retval) {
55347 + case -EAGAIN:
55348 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55349 + break;
55350 + case -ENOMEM:
55351 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55352 + break;
55353 + }
55354 + }
55355 +#endif
55356 + return;
55357 +}
55358 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55359 new file mode 100644
55360 index 0000000..01ddde4
55361 --- /dev/null
55362 +++ b/grsecurity/grsec_init.c
55363 @@ -0,0 +1,277 @@
55364 +#include <linux/kernel.h>
55365 +#include <linux/sched.h>
55366 +#include <linux/mm.h>
55367 +#include <linux/gracl.h>
55368 +#include <linux/slab.h>
55369 +#include <linux/vmalloc.h>
55370 +#include <linux/percpu.h>
55371 +#include <linux/module.h>
55372 +
55373 +int grsec_enable_ptrace_readexec;
55374 +int grsec_enable_setxid;
55375 +int grsec_enable_brute;
55376 +int grsec_enable_link;
55377 +int grsec_enable_dmesg;
55378 +int grsec_enable_harden_ptrace;
55379 +int grsec_enable_fifo;
55380 +int grsec_enable_execlog;
55381 +int grsec_enable_signal;
55382 +int grsec_enable_forkfail;
55383 +int grsec_enable_audit_ptrace;
55384 +int grsec_enable_time;
55385 +int grsec_enable_audit_textrel;
55386 +int grsec_enable_group;
55387 +int grsec_audit_gid;
55388 +int grsec_enable_chdir;
55389 +int grsec_enable_mount;
55390 +int grsec_enable_rofs;
55391 +int grsec_enable_chroot_findtask;
55392 +int grsec_enable_chroot_mount;
55393 +int grsec_enable_chroot_shmat;
55394 +int grsec_enable_chroot_fchdir;
55395 +int grsec_enable_chroot_double;
55396 +int grsec_enable_chroot_pivot;
55397 +int grsec_enable_chroot_chdir;
55398 +int grsec_enable_chroot_chmod;
55399 +int grsec_enable_chroot_mknod;
55400 +int grsec_enable_chroot_nice;
55401 +int grsec_enable_chroot_execlog;
55402 +int grsec_enable_chroot_caps;
55403 +int grsec_enable_chroot_sysctl;
55404 +int grsec_enable_chroot_unix;
55405 +int grsec_enable_tpe;
55406 +int grsec_tpe_gid;
55407 +int grsec_enable_blackhole;
55408 +#ifdef CONFIG_IPV6_MODULE
55409 +EXPORT_SYMBOL(grsec_enable_blackhole);
55410 +#endif
55411 +int grsec_lastack_retries;
55412 +int grsec_enable_tpe_all;
55413 +int grsec_enable_tpe_invert;
55414 +int grsec_enable_socket_all;
55415 +int grsec_socket_all_gid;
55416 +int grsec_enable_socket_client;
55417 +int grsec_socket_client_gid;
55418 +int grsec_enable_socket_server;
55419 +int grsec_socket_server_gid;
55420 +int grsec_resource_logging;
55421 +int grsec_disable_privio;
55422 +int grsec_enable_log_rwxmaps;
55423 +int grsec_lock;
55424 +
55425 +DEFINE_SPINLOCK(grsec_alert_lock);
55426 +unsigned long grsec_alert_wtime = 0;
55427 +unsigned long grsec_alert_fyet = 0;
55428 +
55429 +DEFINE_SPINLOCK(grsec_audit_lock);
55430 +
55431 +DEFINE_RWLOCK(grsec_exec_file_lock);
55432 +
55433 +char *gr_shared_page[4];
55434 +
55435 +char *gr_alert_log_fmt;
55436 +char *gr_audit_log_fmt;
55437 +char *gr_alert_log_buf;
55438 +char *gr_audit_log_buf;
55439 +
55440 +extern struct gr_arg *gr_usermode;
55441 +extern unsigned char *gr_system_salt;
55442 +extern unsigned char *gr_system_sum;
55443 +
55444 +void __init
55445 +grsecurity_init(void)
55446 +{
55447 + int j;
55448 + /* create the per-cpu shared pages */
55449 +
55450 +#ifdef CONFIG_X86
55451 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55452 +#endif
55453 +
55454 + for (j = 0; j < 4; j++) {
55455 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55456 + if (gr_shared_page[j] == NULL) {
55457 + panic("Unable to allocate grsecurity shared page");
55458 + return;
55459 + }
55460 + }
55461 +
55462 + /* allocate log buffers */
55463 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55464 + if (!gr_alert_log_fmt) {
55465 + panic("Unable to allocate grsecurity alert log format buffer");
55466 + return;
55467 + }
55468 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55469 + if (!gr_audit_log_fmt) {
55470 + panic("Unable to allocate grsecurity audit log format buffer");
55471 + return;
55472 + }
55473 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55474 + if (!gr_alert_log_buf) {
55475 + panic("Unable to allocate grsecurity alert log buffer");
55476 + return;
55477 + }
55478 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55479 + if (!gr_audit_log_buf) {
55480 + panic("Unable to allocate grsecurity audit log buffer");
55481 + return;
55482 + }
55483 +
55484 + /* allocate memory for authentication structure */
55485 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55486 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55487 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55488 +
55489 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55490 + panic("Unable to allocate grsecurity authentication structure");
55491 + return;
55492 + }
55493 +
55494 +
55495 +#ifdef CONFIG_GRKERNSEC_IO
55496 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55497 + grsec_disable_privio = 1;
55498 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55499 + grsec_disable_privio = 1;
55500 +#else
55501 + grsec_disable_privio = 0;
55502 +#endif
55503 +#endif
55504 +
55505 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55506 + /* for backward compatibility, tpe_invert always defaults to on if
55507 + enabled in the kernel
55508 + */
55509 + grsec_enable_tpe_invert = 1;
55510 +#endif
55511 +
55512 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55513 +#ifndef CONFIG_GRKERNSEC_SYSCTL
55514 + grsec_lock = 1;
55515 +#endif
55516 +
55517 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55518 + grsec_enable_audit_textrel = 1;
55519 +#endif
55520 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55521 + grsec_enable_log_rwxmaps = 1;
55522 +#endif
55523 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55524 + grsec_enable_group = 1;
55525 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55526 +#endif
55527 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55528 + grsec_enable_ptrace_readexec = 1;
55529 +#endif
55530 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55531 + grsec_enable_chdir = 1;
55532 +#endif
55533 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55534 + grsec_enable_harden_ptrace = 1;
55535 +#endif
55536 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55537 + grsec_enable_mount = 1;
55538 +#endif
55539 +#ifdef CONFIG_GRKERNSEC_LINK
55540 + grsec_enable_link = 1;
55541 +#endif
55542 +#ifdef CONFIG_GRKERNSEC_BRUTE
55543 + grsec_enable_brute = 1;
55544 +#endif
55545 +#ifdef CONFIG_GRKERNSEC_DMESG
55546 + grsec_enable_dmesg = 1;
55547 +#endif
55548 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55549 + grsec_enable_blackhole = 1;
55550 + grsec_lastack_retries = 4;
55551 +#endif
55552 +#ifdef CONFIG_GRKERNSEC_FIFO
55553 + grsec_enable_fifo = 1;
55554 +#endif
55555 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55556 + grsec_enable_execlog = 1;
55557 +#endif
55558 +#ifdef CONFIG_GRKERNSEC_SETXID
55559 + grsec_enable_setxid = 1;
55560 +#endif
55561 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55562 + grsec_enable_signal = 1;
55563 +#endif
55564 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55565 + grsec_enable_forkfail = 1;
55566 +#endif
55567 +#ifdef CONFIG_GRKERNSEC_TIME
55568 + grsec_enable_time = 1;
55569 +#endif
55570 +#ifdef CONFIG_GRKERNSEC_RESLOG
55571 + grsec_resource_logging = 1;
55572 +#endif
55573 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55574 + grsec_enable_chroot_findtask = 1;
55575 +#endif
55576 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55577 + grsec_enable_chroot_unix = 1;
55578 +#endif
55579 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55580 + grsec_enable_chroot_mount = 1;
55581 +#endif
55582 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55583 + grsec_enable_chroot_fchdir = 1;
55584 +#endif
55585 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55586 + grsec_enable_chroot_shmat = 1;
55587 +#endif
55588 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55589 + grsec_enable_audit_ptrace = 1;
55590 +#endif
55591 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55592 + grsec_enable_chroot_double = 1;
55593 +#endif
55594 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55595 + grsec_enable_chroot_pivot = 1;
55596 +#endif
55597 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55598 + grsec_enable_chroot_chdir = 1;
55599 +#endif
55600 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55601 + grsec_enable_chroot_chmod = 1;
55602 +#endif
55603 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55604 + grsec_enable_chroot_mknod = 1;
55605 +#endif
55606 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55607 + grsec_enable_chroot_nice = 1;
55608 +#endif
55609 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55610 + grsec_enable_chroot_execlog = 1;
55611 +#endif
55612 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55613 + grsec_enable_chroot_caps = 1;
55614 +#endif
55615 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55616 + grsec_enable_chroot_sysctl = 1;
55617 +#endif
55618 +#ifdef CONFIG_GRKERNSEC_TPE
55619 + grsec_enable_tpe = 1;
55620 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55621 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55622 + grsec_enable_tpe_all = 1;
55623 +#endif
55624 +#endif
55625 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55626 + grsec_enable_socket_all = 1;
55627 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55628 +#endif
55629 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55630 + grsec_enable_socket_client = 1;
55631 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55632 +#endif
55633 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55634 + grsec_enable_socket_server = 1;
55635 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55636 +#endif
55637 +#endif
55638 +
55639 + return;
55640 +}
55641 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55642 new file mode 100644
55643 index 0000000..3efe141
55644 --- /dev/null
55645 +++ b/grsecurity/grsec_link.c
55646 @@ -0,0 +1,43 @@
55647 +#include <linux/kernel.h>
55648 +#include <linux/sched.h>
55649 +#include <linux/fs.h>
55650 +#include <linux/file.h>
55651 +#include <linux/grinternal.h>
55652 +
55653 +int
55654 +gr_handle_follow_link(const struct inode *parent,
55655 + const struct inode *inode,
55656 + const struct dentry *dentry, const struct vfsmount *mnt)
55657 +{
55658 +#ifdef CONFIG_GRKERNSEC_LINK
55659 + const struct cred *cred = current_cred();
55660 +
55661 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55662 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55663 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55664 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55665 + return -EACCES;
55666 + }
55667 +#endif
55668 + return 0;
55669 +}
55670 +
55671 +int
55672 +gr_handle_hardlink(const struct dentry *dentry,
55673 + const struct vfsmount *mnt,
55674 + struct inode *inode, const int mode, const char *to)
55675 +{
55676 +#ifdef CONFIG_GRKERNSEC_LINK
55677 + const struct cred *cred = current_cred();
55678 +
55679 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55680 + (!S_ISREG(mode) || (mode & S_ISUID) ||
55681 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55682 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55683 + !capable(CAP_FOWNER) && cred->uid) {
55684 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55685 + return -EPERM;
55686 + }
55687 +#endif
55688 + return 0;
55689 +}
55690 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55691 new file mode 100644
55692 index 0000000..a45d2e9
55693 --- /dev/null
55694 +++ b/grsecurity/grsec_log.c
55695 @@ -0,0 +1,322 @@
55696 +#include <linux/kernel.h>
55697 +#include <linux/sched.h>
55698 +#include <linux/file.h>
55699 +#include <linux/tty.h>
55700 +#include <linux/fs.h>
55701 +#include <linux/grinternal.h>
55702 +
55703 +#ifdef CONFIG_TREE_PREEMPT_RCU
55704 +#define DISABLE_PREEMPT() preempt_disable()
55705 +#define ENABLE_PREEMPT() preempt_enable()
55706 +#else
55707 +#define DISABLE_PREEMPT()
55708 +#define ENABLE_PREEMPT()
55709 +#endif
55710 +
55711 +#define BEGIN_LOCKS(x) \
55712 + DISABLE_PREEMPT(); \
55713 + rcu_read_lock(); \
55714 + read_lock(&tasklist_lock); \
55715 + read_lock(&grsec_exec_file_lock); \
55716 + if (x != GR_DO_AUDIT) \
55717 + spin_lock(&grsec_alert_lock); \
55718 + else \
55719 + spin_lock(&grsec_audit_lock)
55720 +
55721 +#define END_LOCKS(x) \
55722 + if (x != GR_DO_AUDIT) \
55723 + spin_unlock(&grsec_alert_lock); \
55724 + else \
55725 + spin_unlock(&grsec_audit_lock); \
55726 + read_unlock(&grsec_exec_file_lock); \
55727 + read_unlock(&tasklist_lock); \
55728 + rcu_read_unlock(); \
55729 + ENABLE_PREEMPT(); \
55730 + if (x == GR_DONT_AUDIT) \
55731 + gr_handle_alertkill(current)
55732 +
55733 +enum {
55734 + FLOODING,
55735 + NO_FLOODING
55736 +};
55737 +
55738 +extern char *gr_alert_log_fmt;
55739 +extern char *gr_audit_log_fmt;
55740 +extern char *gr_alert_log_buf;
55741 +extern char *gr_audit_log_buf;
55742 +
55743 +static int gr_log_start(int audit)
55744 +{
55745 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55746 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55747 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55748 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55749 + unsigned long curr_secs = get_seconds();
55750 +
55751 + if (audit == GR_DO_AUDIT)
55752 + goto set_fmt;
55753 +
55754 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55755 + grsec_alert_wtime = curr_secs;
55756 + grsec_alert_fyet = 0;
55757 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55758 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55759 + grsec_alert_fyet++;
55760 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55761 + grsec_alert_wtime = curr_secs;
55762 + grsec_alert_fyet++;
55763 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55764 + return FLOODING;
55765 + }
55766 + else return FLOODING;
55767 +
55768 +set_fmt:
55769 +#endif
55770 + memset(buf, 0, PAGE_SIZE);
55771 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
55772 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55773 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55774 + } else if (current->signal->curr_ip) {
55775 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55776 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55777 + } else if (gr_acl_is_enabled()) {
55778 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55779 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55780 + } else {
55781 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
55782 + strcpy(buf, fmt);
55783 + }
55784 +
55785 + return NO_FLOODING;
55786 +}
55787 +
55788 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55789 + __attribute__ ((format (printf, 2, 0)));
55790 +
55791 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55792 +{
55793 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55794 + unsigned int len = strlen(buf);
55795 +
55796 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55797 +
55798 + return;
55799 +}
55800 +
55801 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55802 + __attribute__ ((format (printf, 2, 3)));
55803 +
55804 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55805 +{
55806 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55807 + unsigned int len = strlen(buf);
55808 + va_list ap;
55809 +
55810 + va_start(ap, msg);
55811 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55812 + va_end(ap);
55813 +
55814 + return;
55815 +}
55816 +
55817 +static void gr_log_end(int audit, int append_default)
55818 +{
55819 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55820 +
55821 + if (append_default) {
55822 + unsigned int len = strlen(buf);
55823 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55824 + }
55825 +
55826 + printk("%s\n", buf);
55827 +
55828 + return;
55829 +}
55830 +
55831 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55832 +{
55833 + int logtype;
55834 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55835 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55836 + void *voidptr = NULL;
55837 + int num1 = 0, num2 = 0;
55838 + unsigned long ulong1 = 0, ulong2 = 0;
55839 + struct dentry *dentry = NULL;
55840 + struct vfsmount *mnt = NULL;
55841 + struct file *file = NULL;
55842 + struct task_struct *task = NULL;
55843 + const struct cred *cred, *pcred;
55844 + va_list ap;
55845 +
55846 + BEGIN_LOCKS(audit);
55847 + logtype = gr_log_start(audit);
55848 + if (logtype == FLOODING) {
55849 + END_LOCKS(audit);
55850 + return;
55851 + }
55852 + va_start(ap, argtypes);
55853 + switch (argtypes) {
55854 + case GR_TTYSNIFF:
55855 + task = va_arg(ap, struct task_struct *);
55856 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55857 + break;
55858 + case GR_SYSCTL_HIDDEN:
55859 + str1 = va_arg(ap, char *);
55860 + gr_log_middle_varargs(audit, msg, result, str1);
55861 + break;
55862 + case GR_RBAC:
55863 + dentry = va_arg(ap, struct dentry *);
55864 + mnt = va_arg(ap, struct vfsmount *);
55865 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55866 + break;
55867 + case GR_RBAC_STR:
55868 + dentry = va_arg(ap, struct dentry *);
55869 + mnt = va_arg(ap, struct vfsmount *);
55870 + str1 = va_arg(ap, char *);
55871 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55872 + break;
55873 + case GR_STR_RBAC:
55874 + str1 = va_arg(ap, char *);
55875 + dentry = va_arg(ap, struct dentry *);
55876 + mnt = va_arg(ap, struct vfsmount *);
55877 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55878 + break;
55879 + case GR_RBAC_MODE2:
55880 + dentry = va_arg(ap, struct dentry *);
55881 + mnt = va_arg(ap, struct vfsmount *);
55882 + str1 = va_arg(ap, char *);
55883 + str2 = va_arg(ap, char *);
55884 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55885 + break;
55886 + case GR_RBAC_MODE3:
55887 + dentry = va_arg(ap, struct dentry *);
55888 + mnt = va_arg(ap, struct vfsmount *);
55889 + str1 = va_arg(ap, char *);
55890 + str2 = va_arg(ap, char *);
55891 + str3 = va_arg(ap, char *);
55892 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55893 + break;
55894 + case GR_FILENAME:
55895 + dentry = va_arg(ap, struct dentry *);
55896 + mnt = va_arg(ap, struct vfsmount *);
55897 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55898 + break;
55899 + case GR_STR_FILENAME:
55900 + str1 = va_arg(ap, char *);
55901 + dentry = va_arg(ap, struct dentry *);
55902 + mnt = va_arg(ap, struct vfsmount *);
55903 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55904 + break;
55905 + case GR_FILENAME_STR:
55906 + dentry = va_arg(ap, struct dentry *);
55907 + mnt = va_arg(ap, struct vfsmount *);
55908 + str1 = va_arg(ap, char *);
55909 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55910 + break;
55911 + case GR_FILENAME_TWO_INT:
55912 + dentry = va_arg(ap, struct dentry *);
55913 + mnt = va_arg(ap, struct vfsmount *);
55914 + num1 = va_arg(ap, int);
55915 + num2 = va_arg(ap, int);
55916 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55917 + break;
55918 + case GR_FILENAME_TWO_INT_STR:
55919 + dentry = va_arg(ap, struct dentry *);
55920 + mnt = va_arg(ap, struct vfsmount *);
55921 + num1 = va_arg(ap, int);
55922 + num2 = va_arg(ap, int);
55923 + str1 = va_arg(ap, char *);
55924 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55925 + break;
55926 + case GR_TEXTREL:
55927 + file = va_arg(ap, struct file *);
55928 + ulong1 = va_arg(ap, unsigned long);
55929 + ulong2 = va_arg(ap, unsigned long);
55930 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55931 + break;
55932 + case GR_PTRACE:
55933 + task = va_arg(ap, struct task_struct *);
55934 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55935 + break;
55936 + case GR_RESOURCE:
55937 + task = va_arg(ap, struct task_struct *);
55938 + cred = __task_cred(task);
55939 + pcred = __task_cred(task->real_parent);
55940 + ulong1 = va_arg(ap, unsigned long);
55941 + str1 = va_arg(ap, char *);
55942 + ulong2 = va_arg(ap, unsigned long);
55943 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55944 + break;
55945 + case GR_CAP:
55946 + task = va_arg(ap, struct task_struct *);
55947 + cred = __task_cred(task);
55948 + pcred = __task_cred(task->real_parent);
55949 + str1 = va_arg(ap, char *);
55950 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55951 + break;
55952 + case GR_SIG:
55953 + str1 = va_arg(ap, char *);
55954 + voidptr = va_arg(ap, void *);
55955 + gr_log_middle_varargs(audit, msg, str1, voidptr);
55956 + break;
55957 + case GR_SIG2:
55958 + task = va_arg(ap, struct task_struct *);
55959 + cred = __task_cred(task);
55960 + pcred = __task_cred(task->real_parent);
55961 + num1 = va_arg(ap, int);
55962 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55963 + break;
55964 + case GR_CRASH1:
55965 + task = va_arg(ap, struct task_struct *);
55966 + cred = __task_cred(task);
55967 + pcred = __task_cred(task->real_parent);
55968 + ulong1 = va_arg(ap, unsigned long);
55969 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55970 + break;
55971 + case GR_CRASH2:
55972 + task = va_arg(ap, struct task_struct *);
55973 + cred = __task_cred(task);
55974 + pcred = __task_cred(task->real_parent);
55975 + ulong1 = va_arg(ap, unsigned long);
55976 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55977 + break;
55978 + case GR_RWXMAP:
55979 + file = va_arg(ap, struct file *);
55980 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55981 + break;
55982 + case GR_PSACCT:
55983 + {
55984 + unsigned int wday, cday;
55985 + __u8 whr, chr;
55986 + __u8 wmin, cmin;
55987 + __u8 wsec, csec;
55988 + char cur_tty[64] = { 0 };
55989 + char parent_tty[64] = { 0 };
55990 +
55991 + task = va_arg(ap, struct task_struct *);
55992 + wday = va_arg(ap, unsigned int);
55993 + cday = va_arg(ap, unsigned int);
55994 + whr = va_arg(ap, int);
55995 + chr = va_arg(ap, int);
55996 + wmin = va_arg(ap, int);
55997 + cmin = va_arg(ap, int);
55998 + wsec = va_arg(ap, int);
55999 + csec = va_arg(ap, int);
56000 + ulong1 = va_arg(ap, unsigned long);
56001 + cred = __task_cred(task);
56002 + pcred = __task_cred(task->real_parent);
56003 +
56004 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
56005 + }
56006 + break;
56007 + default:
56008 + gr_log_middle(audit, msg, ap);
56009 + }
56010 + va_end(ap);
56011 + // these don't need DEFAULTSECARGS printed on the end
56012 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
56013 + gr_log_end(audit, 0);
56014 + else
56015 + gr_log_end(audit, 1);
56016 + END_LOCKS(audit);
56017 +}
56018 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
56019 new file mode 100644
56020 index 0000000..f536303
56021 --- /dev/null
56022 +++ b/grsecurity/grsec_mem.c
56023 @@ -0,0 +1,40 @@
56024 +#include <linux/kernel.h>
56025 +#include <linux/sched.h>
56026 +#include <linux/mm.h>
56027 +#include <linux/mman.h>
56028 +#include <linux/grinternal.h>
56029 +
56030 +void
56031 +gr_handle_ioperm(void)
56032 +{
56033 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
56034 + return;
56035 +}
56036 +
56037 +void
56038 +gr_handle_iopl(void)
56039 +{
56040 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
56041 + return;
56042 +}
56043 +
56044 +void
56045 +gr_handle_mem_readwrite(u64 from, u64 to)
56046 +{
56047 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
56048 + return;
56049 +}
56050 +
56051 +void
56052 +gr_handle_vm86(void)
56053 +{
56054 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
56055 + return;
56056 +}
56057 +
56058 +void
56059 +gr_log_badprocpid(const char *entry)
56060 +{
56061 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
56062 + return;
56063 +}
56064 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
56065 new file mode 100644
56066 index 0000000..2131422
56067 --- /dev/null
56068 +++ b/grsecurity/grsec_mount.c
56069 @@ -0,0 +1,62 @@
56070 +#include <linux/kernel.h>
56071 +#include <linux/sched.h>
56072 +#include <linux/mount.h>
56073 +#include <linux/grsecurity.h>
56074 +#include <linux/grinternal.h>
56075 +
56076 +void
56077 +gr_log_remount(const char *devname, const int retval)
56078 +{
56079 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56080 + if (grsec_enable_mount && (retval >= 0))
56081 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
56082 +#endif
56083 + return;
56084 +}
56085 +
56086 +void
56087 +gr_log_unmount(const char *devname, const int retval)
56088 +{
56089 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56090 + if (grsec_enable_mount && (retval >= 0))
56091 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
56092 +#endif
56093 + return;
56094 +}
56095 +
56096 +void
56097 +gr_log_mount(const char *from, const char *to, const int retval)
56098 +{
56099 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56100 + if (grsec_enable_mount && (retval >= 0))
56101 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
56102 +#endif
56103 + return;
56104 +}
56105 +
56106 +int
56107 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
56108 +{
56109 +#ifdef CONFIG_GRKERNSEC_ROFS
56110 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
56111 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
56112 + return -EPERM;
56113 + } else
56114 + return 0;
56115 +#endif
56116 + return 0;
56117 +}
56118 +
56119 +int
56120 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
56121 +{
56122 +#ifdef CONFIG_GRKERNSEC_ROFS
56123 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
56124 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
56125 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
56126 + return -EPERM;
56127 + } else
56128 + return 0;
56129 +#endif
56130 + return 0;
56131 +}
56132 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
56133 new file mode 100644
56134 index 0000000..a3b12a0
56135 --- /dev/null
56136 +++ b/grsecurity/grsec_pax.c
56137 @@ -0,0 +1,36 @@
56138 +#include <linux/kernel.h>
56139 +#include <linux/sched.h>
56140 +#include <linux/mm.h>
56141 +#include <linux/file.h>
56142 +#include <linux/grinternal.h>
56143 +#include <linux/grsecurity.h>
56144 +
56145 +void
56146 +gr_log_textrel(struct vm_area_struct * vma)
56147 +{
56148 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56149 + if (grsec_enable_audit_textrel)
56150 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
56151 +#endif
56152 + return;
56153 +}
56154 +
56155 +void
56156 +gr_log_rwxmmap(struct file *file)
56157 +{
56158 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56159 + if (grsec_enable_log_rwxmaps)
56160 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
56161 +#endif
56162 + return;
56163 +}
56164 +
56165 +void
56166 +gr_log_rwxmprotect(struct file *file)
56167 +{
56168 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56169 + if (grsec_enable_log_rwxmaps)
56170 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
56171 +#endif
56172 + return;
56173 +}
56174 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
56175 new file mode 100644
56176 index 0000000..f7f29aa
56177 --- /dev/null
56178 +++ b/grsecurity/grsec_ptrace.c
56179 @@ -0,0 +1,30 @@
56180 +#include <linux/kernel.h>
56181 +#include <linux/sched.h>
56182 +#include <linux/grinternal.h>
56183 +#include <linux/security.h>
56184 +
56185 +void
56186 +gr_audit_ptrace(struct task_struct *task)
56187 +{
56188 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56189 + if (grsec_enable_audit_ptrace)
56190 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
56191 +#endif
56192 + return;
56193 +}
56194 +
56195 +int
56196 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
56197 +{
56198 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56199 + const struct dentry *dentry = file->f_path.dentry;
56200 + const struct vfsmount *mnt = file->f_path.mnt;
56201 +
56202 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56203 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56204 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56205 + return -EACCES;
56206 + }
56207 +#endif
56208 + return 0;
56209 +}
56210 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56211 new file mode 100644
56212 index 0000000..7a5b2de
56213 --- /dev/null
56214 +++ b/grsecurity/grsec_sig.c
56215 @@ -0,0 +1,207 @@
56216 +#include <linux/kernel.h>
56217 +#include <linux/sched.h>
56218 +#include <linux/delay.h>
56219 +#include <linux/grsecurity.h>
56220 +#include <linux/grinternal.h>
56221 +#include <linux/hardirq.h>
56222 +
56223 +char *signames[] = {
56224 + [SIGSEGV] = "Segmentation fault",
56225 + [SIGILL] = "Illegal instruction",
56226 + [SIGABRT] = "Abort",
56227 + [SIGBUS] = "Invalid alignment/Bus error"
56228 +};
56229 +
56230 +void
56231 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56232 +{
56233 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56234 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56235 + (sig == SIGABRT) || (sig == SIGBUS))) {
56236 + if (t->pid == current->pid) {
56237 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56238 + } else {
56239 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56240 + }
56241 + }
56242 +#endif
56243 + return;
56244 +}
56245 +
56246 +int
56247 +gr_handle_signal(const struct task_struct *p, const int sig)
56248 +{
56249 +#ifdef CONFIG_GRKERNSEC
56250 + /* ignore the 0 signal for protected task checks */
56251 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56252 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56253 + return -EPERM;
56254 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56255 + return -EPERM;
56256 + }
56257 +#endif
56258 + return 0;
56259 +}
56260 +
56261 +#ifdef CONFIG_GRKERNSEC
56262 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56263 +
56264 +int gr_fake_force_sig(int sig, struct task_struct *t)
56265 +{
56266 + unsigned long int flags;
56267 + int ret, blocked, ignored;
56268 + struct k_sigaction *action;
56269 +
56270 + spin_lock_irqsave(&t->sighand->siglock, flags);
56271 + action = &t->sighand->action[sig-1];
56272 + ignored = action->sa.sa_handler == SIG_IGN;
56273 + blocked = sigismember(&t->blocked, sig);
56274 + if (blocked || ignored) {
56275 + action->sa.sa_handler = SIG_DFL;
56276 + if (blocked) {
56277 + sigdelset(&t->blocked, sig);
56278 + recalc_sigpending_and_wake(t);
56279 + }
56280 + }
56281 + if (action->sa.sa_handler == SIG_DFL)
56282 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
56283 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56284 +
56285 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
56286 +
56287 + return ret;
56288 +}
56289 +#endif
56290 +
56291 +#ifdef CONFIG_GRKERNSEC_BRUTE
56292 +#define GR_USER_BAN_TIME (15 * 60)
56293 +
56294 +static int __get_dumpable(unsigned long mm_flags)
56295 +{
56296 + int ret;
56297 +
56298 + ret = mm_flags & MMF_DUMPABLE_MASK;
56299 + return (ret >= 2) ? 2 : ret;
56300 +}
56301 +#endif
56302 +
56303 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56304 +{
56305 +#ifdef CONFIG_GRKERNSEC_BRUTE
56306 + uid_t uid = 0;
56307 +
56308 + if (!grsec_enable_brute)
56309 + return;
56310 +
56311 + rcu_read_lock();
56312 + read_lock(&tasklist_lock);
56313 + read_lock(&grsec_exec_file_lock);
56314 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56315 + p->real_parent->brute = 1;
56316 + else {
56317 + const struct cred *cred = __task_cred(p), *cred2;
56318 + struct task_struct *tsk, *tsk2;
56319 +
56320 + if (!__get_dumpable(mm_flags) && cred->uid) {
56321 + struct user_struct *user;
56322 +
56323 + uid = cred->uid;
56324 +
56325 + /* this is put upon execution past expiration */
56326 + user = find_user(uid);
56327 + if (user == NULL)
56328 + goto unlock;
56329 + user->banned = 1;
56330 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56331 + if (user->ban_expires == ~0UL)
56332 + user->ban_expires--;
56333 +
56334 + do_each_thread(tsk2, tsk) {
56335 + cred2 = __task_cred(tsk);
56336 + if (tsk != p && cred2->uid == uid)
56337 + gr_fake_force_sig(SIGKILL, tsk);
56338 + } while_each_thread(tsk2, tsk);
56339 + }
56340 + }
56341 +unlock:
56342 + read_unlock(&grsec_exec_file_lock);
56343 + read_unlock(&tasklist_lock);
56344 + rcu_read_unlock();
56345 +
56346 + if (uid)
56347 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56348 +
56349 +#endif
56350 + return;
56351 +}
56352 +
56353 +void gr_handle_brute_check(void)
56354 +{
56355 +#ifdef CONFIG_GRKERNSEC_BRUTE
56356 + if (current->brute)
56357 + msleep(30 * 1000);
56358 +#endif
56359 + return;
56360 +}
56361 +
56362 +void gr_handle_kernel_exploit(void)
56363 +{
56364 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56365 + const struct cred *cred;
56366 + struct task_struct *tsk, *tsk2;
56367 + struct user_struct *user;
56368 + uid_t uid;
56369 +
56370 + if (in_irq() || in_serving_softirq() || in_nmi())
56371 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56372 +
56373 + uid = current_uid();
56374 +
56375 + if (uid == 0)
56376 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
56377 + else {
56378 + /* kill all the processes of this user, hold a reference
56379 + to their creds struct, and prevent them from creating
56380 + another process until system reset
56381 + */
56382 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56383 + /* we intentionally leak this ref */
56384 + user = get_uid(current->cred->user);
56385 + if (user) {
56386 + user->banned = 1;
56387 + user->ban_expires = ~0UL;
56388 + }
56389 +
56390 + read_lock(&tasklist_lock);
56391 + do_each_thread(tsk2, tsk) {
56392 + cred = __task_cred(tsk);
56393 + if (cred->uid == uid)
56394 + gr_fake_force_sig(SIGKILL, tsk);
56395 + } while_each_thread(tsk2, tsk);
56396 + read_unlock(&tasklist_lock);
56397 + }
56398 +#endif
56399 +}
56400 +
56401 +int __gr_process_user_ban(struct user_struct *user)
56402 +{
56403 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56404 + if (unlikely(user->banned)) {
56405 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56406 + user->banned = 0;
56407 + user->ban_expires = 0;
56408 + free_uid(user);
56409 + } else
56410 + return -EPERM;
56411 + }
56412 +#endif
56413 + return 0;
56414 +}
56415 +
56416 +int gr_process_user_ban(void)
56417 +{
56418 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56419 + return __gr_process_user_ban(current->cred->user);
56420 +#endif
56421 + return 0;
56422 +}
56423 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56424 new file mode 100644
56425 index 0000000..4030d57
56426 --- /dev/null
56427 +++ b/grsecurity/grsec_sock.c
56428 @@ -0,0 +1,244 @@
56429 +#include <linux/kernel.h>
56430 +#include <linux/module.h>
56431 +#include <linux/sched.h>
56432 +#include <linux/file.h>
56433 +#include <linux/net.h>
56434 +#include <linux/in.h>
56435 +#include <linux/ip.h>
56436 +#include <net/sock.h>
56437 +#include <net/inet_sock.h>
56438 +#include <linux/grsecurity.h>
56439 +#include <linux/grinternal.h>
56440 +#include <linux/gracl.h>
56441 +
56442 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56443 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56444 +
56445 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
56446 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
56447 +
56448 +#ifdef CONFIG_UNIX_MODULE
56449 +EXPORT_SYMBOL(gr_acl_handle_unix);
56450 +EXPORT_SYMBOL(gr_acl_handle_mknod);
56451 +EXPORT_SYMBOL(gr_handle_chroot_unix);
56452 +EXPORT_SYMBOL(gr_handle_create);
56453 +#endif
56454 +
56455 +#ifdef CONFIG_GRKERNSEC
56456 +#define gr_conn_table_size 32749
56457 +struct conn_table_entry {
56458 + struct conn_table_entry *next;
56459 + struct signal_struct *sig;
56460 +};
56461 +
56462 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56463 +DEFINE_SPINLOCK(gr_conn_table_lock);
56464 +
56465 +extern const char * gr_socktype_to_name(unsigned char type);
56466 +extern const char * gr_proto_to_name(unsigned char proto);
56467 +extern const char * gr_sockfamily_to_name(unsigned char family);
56468 +
56469 +static __inline__ int
56470 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56471 +{
56472 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56473 +}
56474 +
56475 +static __inline__ int
56476 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56477 + __u16 sport, __u16 dport)
56478 +{
56479 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56480 + sig->gr_sport == sport && sig->gr_dport == dport))
56481 + return 1;
56482 + else
56483 + return 0;
56484 +}
56485 +
56486 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56487 +{
56488 + struct conn_table_entry **match;
56489 + unsigned int index;
56490 +
56491 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56492 + sig->gr_sport, sig->gr_dport,
56493 + gr_conn_table_size);
56494 +
56495 + newent->sig = sig;
56496 +
56497 + match = &gr_conn_table[index];
56498 + newent->next = *match;
56499 + *match = newent;
56500 +
56501 + return;
56502 +}
56503 +
56504 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56505 +{
56506 + struct conn_table_entry *match, *last = NULL;
56507 + unsigned int index;
56508 +
56509 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56510 + sig->gr_sport, sig->gr_dport,
56511 + gr_conn_table_size);
56512 +
56513 + match = gr_conn_table[index];
56514 + while (match && !conn_match(match->sig,
56515 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56516 + sig->gr_dport)) {
56517 + last = match;
56518 + match = match->next;
56519 + }
56520 +
56521 + if (match) {
56522 + if (last)
56523 + last->next = match->next;
56524 + else
56525 + gr_conn_table[index] = NULL;
56526 + kfree(match);
56527 + }
56528 +
56529 + return;
56530 +}
56531 +
56532 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56533 + __u16 sport, __u16 dport)
56534 +{
56535 + struct conn_table_entry *match;
56536 + unsigned int index;
56537 +
56538 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56539 +
56540 + match = gr_conn_table[index];
56541 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56542 + match = match->next;
56543 +
56544 + if (match)
56545 + return match->sig;
56546 + else
56547 + return NULL;
56548 +}
56549 +
56550 +#endif
56551 +
56552 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56553 +{
56554 +#ifdef CONFIG_GRKERNSEC
56555 + struct signal_struct *sig = task->signal;
56556 + struct conn_table_entry *newent;
56557 +
56558 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56559 + if (newent == NULL)
56560 + return;
56561 + /* no bh lock needed since we are called with bh disabled */
56562 + spin_lock(&gr_conn_table_lock);
56563 + gr_del_task_from_ip_table_nolock(sig);
56564 + sig->gr_saddr = inet->inet_rcv_saddr;
56565 + sig->gr_daddr = inet->inet_daddr;
56566 + sig->gr_sport = inet->inet_sport;
56567 + sig->gr_dport = inet->inet_dport;
56568 + gr_add_to_task_ip_table_nolock(sig, newent);
56569 + spin_unlock(&gr_conn_table_lock);
56570 +#endif
56571 + return;
56572 +}
56573 +
56574 +void gr_del_task_from_ip_table(struct task_struct *task)
56575 +{
56576 +#ifdef CONFIG_GRKERNSEC
56577 + spin_lock_bh(&gr_conn_table_lock);
56578 + gr_del_task_from_ip_table_nolock(task->signal);
56579 + spin_unlock_bh(&gr_conn_table_lock);
56580 +#endif
56581 + return;
56582 +}
56583 +
56584 +void
56585 +gr_attach_curr_ip(const struct sock *sk)
56586 +{
56587 +#ifdef CONFIG_GRKERNSEC
56588 + struct signal_struct *p, *set;
56589 + const struct inet_sock *inet = inet_sk(sk);
56590 +
56591 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56592 + return;
56593 +
56594 + set = current->signal;
56595 +
56596 + spin_lock_bh(&gr_conn_table_lock);
56597 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56598 + inet->inet_dport, inet->inet_sport);
56599 + if (unlikely(p != NULL)) {
56600 + set->curr_ip = p->curr_ip;
56601 + set->used_accept = 1;
56602 + gr_del_task_from_ip_table_nolock(p);
56603 + spin_unlock_bh(&gr_conn_table_lock);
56604 + return;
56605 + }
56606 + spin_unlock_bh(&gr_conn_table_lock);
56607 +
56608 + set->curr_ip = inet->inet_daddr;
56609 + set->used_accept = 1;
56610 +#endif
56611 + return;
56612 +}
56613 +
56614 +int
56615 +gr_handle_sock_all(const int family, const int type, const int protocol)
56616 +{
56617 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56618 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56619 + (family != AF_UNIX)) {
56620 + if (family == AF_INET)
56621 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56622 + else
56623 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56624 + return -EACCES;
56625 + }
56626 +#endif
56627 + return 0;
56628 +}
56629 +
56630 +int
56631 +gr_handle_sock_server(const struct sockaddr *sck)
56632 +{
56633 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56634 + if (grsec_enable_socket_server &&
56635 + in_group_p(grsec_socket_server_gid) &&
56636 + sck && (sck->sa_family != AF_UNIX) &&
56637 + (sck->sa_family != AF_LOCAL)) {
56638 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56639 + return -EACCES;
56640 + }
56641 +#endif
56642 + return 0;
56643 +}
56644 +
56645 +int
56646 +gr_handle_sock_server_other(const struct sock *sck)
56647 +{
56648 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56649 + if (grsec_enable_socket_server &&
56650 + in_group_p(grsec_socket_server_gid) &&
56651 + sck && (sck->sk_family != AF_UNIX) &&
56652 + (sck->sk_family != AF_LOCAL)) {
56653 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56654 + return -EACCES;
56655 + }
56656 +#endif
56657 + return 0;
56658 +}
56659 +
56660 +int
56661 +gr_handle_sock_client(const struct sockaddr *sck)
56662 +{
56663 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56664 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56665 + sck && (sck->sa_family != AF_UNIX) &&
56666 + (sck->sa_family != AF_LOCAL)) {
56667 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56668 + return -EACCES;
56669 + }
56670 +#endif
56671 + return 0;
56672 +}
56673 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56674 new file mode 100644
56675 index 0000000..a1aedd7
56676 --- /dev/null
56677 +++ b/grsecurity/grsec_sysctl.c
56678 @@ -0,0 +1,451 @@
56679 +#include <linux/kernel.h>
56680 +#include <linux/sched.h>
56681 +#include <linux/sysctl.h>
56682 +#include <linux/grsecurity.h>
56683 +#include <linux/grinternal.h>
56684 +
56685 +int
56686 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56687 +{
56688 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56689 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56690 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56691 + return -EACCES;
56692 + }
56693 +#endif
56694 + return 0;
56695 +}
56696 +
56697 +#ifdef CONFIG_GRKERNSEC_ROFS
56698 +static int __maybe_unused one = 1;
56699 +#endif
56700 +
56701 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56702 +struct ctl_table grsecurity_table[] = {
56703 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56704 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56705 +#ifdef CONFIG_GRKERNSEC_IO
56706 + {
56707 + .procname = "disable_priv_io",
56708 + .data = &grsec_disable_privio,
56709 + .maxlen = sizeof(int),
56710 + .mode = 0600,
56711 + .proc_handler = &proc_dointvec,
56712 + },
56713 +#endif
56714 +#endif
56715 +#ifdef CONFIG_GRKERNSEC_LINK
56716 + {
56717 + .procname = "linking_restrictions",
56718 + .data = &grsec_enable_link,
56719 + .maxlen = sizeof(int),
56720 + .mode = 0600,
56721 + .proc_handler = &proc_dointvec,
56722 + },
56723 +#endif
56724 +#ifdef CONFIG_GRKERNSEC_BRUTE
56725 + {
56726 + .procname = "deter_bruteforce",
56727 + .data = &grsec_enable_brute,
56728 + .maxlen = sizeof(int),
56729 + .mode = 0600,
56730 + .proc_handler = &proc_dointvec,
56731 + },
56732 +#endif
56733 +#ifdef CONFIG_GRKERNSEC_FIFO
56734 + {
56735 + .procname = "fifo_restrictions",
56736 + .data = &grsec_enable_fifo,
56737 + .maxlen = sizeof(int),
56738 + .mode = 0600,
56739 + .proc_handler = &proc_dointvec,
56740 + },
56741 +#endif
56742 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56743 + {
56744 + .procname = "ptrace_readexec",
56745 + .data = &grsec_enable_ptrace_readexec,
56746 + .maxlen = sizeof(int),
56747 + .mode = 0600,
56748 + .proc_handler = &proc_dointvec,
56749 + },
56750 +#endif
56751 +#ifdef CONFIG_GRKERNSEC_SETXID
56752 + {
56753 + .procname = "consistent_setxid",
56754 + .data = &grsec_enable_setxid,
56755 + .maxlen = sizeof(int),
56756 + .mode = 0600,
56757 + .proc_handler = &proc_dointvec,
56758 + },
56759 +#endif
56760 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56761 + {
56762 + .procname = "ip_blackhole",
56763 + .data = &grsec_enable_blackhole,
56764 + .maxlen = sizeof(int),
56765 + .mode = 0600,
56766 + .proc_handler = &proc_dointvec,
56767 + },
56768 + {
56769 + .procname = "lastack_retries",
56770 + .data = &grsec_lastack_retries,
56771 + .maxlen = sizeof(int),
56772 + .mode = 0600,
56773 + .proc_handler = &proc_dointvec,
56774 + },
56775 +#endif
56776 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56777 + {
56778 + .procname = "exec_logging",
56779 + .data = &grsec_enable_execlog,
56780 + .maxlen = sizeof(int),
56781 + .mode = 0600,
56782 + .proc_handler = &proc_dointvec,
56783 + },
56784 +#endif
56785 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56786 + {
56787 + .procname = "rwxmap_logging",
56788 + .data = &grsec_enable_log_rwxmaps,
56789 + .maxlen = sizeof(int),
56790 + .mode = 0600,
56791 + .proc_handler = &proc_dointvec,
56792 + },
56793 +#endif
56794 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56795 + {
56796 + .procname = "signal_logging",
56797 + .data = &grsec_enable_signal,
56798 + .maxlen = sizeof(int),
56799 + .mode = 0600,
56800 + .proc_handler = &proc_dointvec,
56801 + },
56802 +#endif
56803 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56804 + {
56805 + .procname = "forkfail_logging",
56806 + .data = &grsec_enable_forkfail,
56807 + .maxlen = sizeof(int),
56808 + .mode = 0600,
56809 + .proc_handler = &proc_dointvec,
56810 + },
56811 +#endif
56812 +#ifdef CONFIG_GRKERNSEC_TIME
56813 + {
56814 + .procname = "timechange_logging",
56815 + .data = &grsec_enable_time,
56816 + .maxlen = sizeof(int),
56817 + .mode = 0600,
56818 + .proc_handler = &proc_dointvec,
56819 + },
56820 +#endif
56821 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56822 + {
56823 + .procname = "chroot_deny_shmat",
56824 + .data = &grsec_enable_chroot_shmat,
56825 + .maxlen = sizeof(int),
56826 + .mode = 0600,
56827 + .proc_handler = &proc_dointvec,
56828 + },
56829 +#endif
56830 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56831 + {
56832 + .procname = "chroot_deny_unix",
56833 + .data = &grsec_enable_chroot_unix,
56834 + .maxlen = sizeof(int),
56835 + .mode = 0600,
56836 + .proc_handler = &proc_dointvec,
56837 + },
56838 +#endif
56839 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56840 + {
56841 + .procname = "chroot_deny_mount",
56842 + .data = &grsec_enable_chroot_mount,
56843 + .maxlen = sizeof(int),
56844 + .mode = 0600,
56845 + .proc_handler = &proc_dointvec,
56846 + },
56847 +#endif
56848 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56849 + {
56850 + .procname = "chroot_deny_fchdir",
56851 + .data = &grsec_enable_chroot_fchdir,
56852 + .maxlen = sizeof(int),
56853 + .mode = 0600,
56854 + .proc_handler = &proc_dointvec,
56855 + },
56856 +#endif
56857 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56858 + {
56859 + .procname = "chroot_deny_chroot",
56860 + .data = &grsec_enable_chroot_double,
56861 + .maxlen = sizeof(int),
56862 + .mode = 0600,
56863 + .proc_handler = &proc_dointvec,
56864 + },
56865 +#endif
56866 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56867 + {
56868 + .procname = "chroot_deny_pivot",
56869 + .data = &grsec_enable_chroot_pivot,
56870 + .maxlen = sizeof(int),
56871 + .mode = 0600,
56872 + .proc_handler = &proc_dointvec,
56873 + },
56874 +#endif
56875 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56876 + {
56877 + .procname = "chroot_enforce_chdir",
56878 + .data = &grsec_enable_chroot_chdir,
56879 + .maxlen = sizeof(int),
56880 + .mode = 0600,
56881 + .proc_handler = &proc_dointvec,
56882 + },
56883 +#endif
56884 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56885 + {
56886 + .procname = "chroot_deny_chmod",
56887 + .data = &grsec_enable_chroot_chmod,
56888 + .maxlen = sizeof(int),
56889 + .mode = 0600,
56890 + .proc_handler = &proc_dointvec,
56891 + },
56892 +#endif
56893 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56894 + {
56895 + .procname = "chroot_deny_mknod",
56896 + .data = &grsec_enable_chroot_mknod,
56897 + .maxlen = sizeof(int),
56898 + .mode = 0600,
56899 + .proc_handler = &proc_dointvec,
56900 + },
56901 +#endif
56902 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56903 + {
56904 + .procname = "chroot_restrict_nice",
56905 + .data = &grsec_enable_chroot_nice,
56906 + .maxlen = sizeof(int),
56907 + .mode = 0600,
56908 + .proc_handler = &proc_dointvec,
56909 + },
56910 +#endif
56911 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56912 + {
56913 + .procname = "chroot_execlog",
56914 + .data = &grsec_enable_chroot_execlog,
56915 + .maxlen = sizeof(int),
56916 + .mode = 0600,
56917 + .proc_handler = &proc_dointvec,
56918 + },
56919 +#endif
56920 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56921 + {
56922 + .procname = "chroot_caps",
56923 + .data = &grsec_enable_chroot_caps,
56924 + .maxlen = sizeof(int),
56925 + .mode = 0600,
56926 + .proc_handler = &proc_dointvec,
56927 + },
56928 +#endif
56929 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56930 + {
56931 + .procname = "chroot_deny_sysctl",
56932 + .data = &grsec_enable_chroot_sysctl,
56933 + .maxlen = sizeof(int),
56934 + .mode = 0600,
56935 + .proc_handler = &proc_dointvec,
56936 + },
56937 +#endif
56938 +#ifdef CONFIG_GRKERNSEC_TPE
56939 + {
56940 + .procname = "tpe",
56941 + .data = &grsec_enable_tpe,
56942 + .maxlen = sizeof(int),
56943 + .mode = 0600,
56944 + .proc_handler = &proc_dointvec,
56945 + },
56946 + {
56947 + .procname = "tpe_gid",
56948 + .data = &grsec_tpe_gid,
56949 + .maxlen = sizeof(int),
56950 + .mode = 0600,
56951 + .proc_handler = &proc_dointvec,
56952 + },
56953 +#endif
56954 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56955 + {
56956 + .procname = "tpe_invert",
56957 + .data = &grsec_enable_tpe_invert,
56958 + .maxlen = sizeof(int),
56959 + .mode = 0600,
56960 + .proc_handler = &proc_dointvec,
56961 + },
56962 +#endif
56963 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56964 + {
56965 + .procname = "tpe_restrict_all",
56966 + .data = &grsec_enable_tpe_all,
56967 + .maxlen = sizeof(int),
56968 + .mode = 0600,
56969 + .proc_handler = &proc_dointvec,
56970 + },
56971 +#endif
56972 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56973 + {
56974 + .procname = "socket_all",
56975 + .data = &grsec_enable_socket_all,
56976 + .maxlen = sizeof(int),
56977 + .mode = 0600,
56978 + .proc_handler = &proc_dointvec,
56979 + },
56980 + {
56981 + .procname = "socket_all_gid",
56982 + .data = &grsec_socket_all_gid,
56983 + .maxlen = sizeof(int),
56984 + .mode = 0600,
56985 + .proc_handler = &proc_dointvec,
56986 + },
56987 +#endif
56988 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56989 + {
56990 + .procname = "socket_client",
56991 + .data = &grsec_enable_socket_client,
56992 + .maxlen = sizeof(int),
56993 + .mode = 0600,
56994 + .proc_handler = &proc_dointvec,
56995 + },
56996 + {
56997 + .procname = "socket_client_gid",
56998 + .data = &grsec_socket_client_gid,
56999 + .maxlen = sizeof(int),
57000 + .mode = 0600,
57001 + .proc_handler = &proc_dointvec,
57002 + },
57003 +#endif
57004 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57005 + {
57006 + .procname = "socket_server",
57007 + .data = &grsec_enable_socket_server,
57008 + .maxlen = sizeof(int),
57009 + .mode = 0600,
57010 + .proc_handler = &proc_dointvec,
57011 + },
57012 + {
57013 + .procname = "socket_server_gid",
57014 + .data = &grsec_socket_server_gid,
57015 + .maxlen = sizeof(int),
57016 + .mode = 0600,
57017 + .proc_handler = &proc_dointvec,
57018 + },
57019 +#endif
57020 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57021 + {
57022 + .procname = "audit_group",
57023 + .data = &grsec_enable_group,
57024 + .maxlen = sizeof(int),
57025 + .mode = 0600,
57026 + .proc_handler = &proc_dointvec,
57027 + },
57028 + {
57029 + .procname = "audit_gid",
57030 + .data = &grsec_audit_gid,
57031 + .maxlen = sizeof(int),
57032 + .mode = 0600,
57033 + .proc_handler = &proc_dointvec,
57034 + },
57035 +#endif
57036 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57037 + {
57038 + .procname = "audit_chdir",
57039 + .data = &grsec_enable_chdir,
57040 + .maxlen = sizeof(int),
57041 + .mode = 0600,
57042 + .proc_handler = &proc_dointvec,
57043 + },
57044 +#endif
57045 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57046 + {
57047 + .procname = "audit_mount",
57048 + .data = &grsec_enable_mount,
57049 + .maxlen = sizeof(int),
57050 + .mode = 0600,
57051 + .proc_handler = &proc_dointvec,
57052 + },
57053 +#endif
57054 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57055 + {
57056 + .procname = "audit_textrel",
57057 + .data = &grsec_enable_audit_textrel,
57058 + .maxlen = sizeof(int),
57059 + .mode = 0600,
57060 + .proc_handler = &proc_dointvec,
57061 + },
57062 +#endif
57063 +#ifdef CONFIG_GRKERNSEC_DMESG
57064 + {
57065 + .procname = "dmesg",
57066 + .data = &grsec_enable_dmesg,
57067 + .maxlen = sizeof(int),
57068 + .mode = 0600,
57069 + .proc_handler = &proc_dointvec,
57070 + },
57071 +#endif
57072 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57073 + {
57074 + .procname = "chroot_findtask",
57075 + .data = &grsec_enable_chroot_findtask,
57076 + .maxlen = sizeof(int),
57077 + .mode = 0600,
57078 + .proc_handler = &proc_dointvec,
57079 + },
57080 +#endif
57081 +#ifdef CONFIG_GRKERNSEC_RESLOG
57082 + {
57083 + .procname = "resource_logging",
57084 + .data = &grsec_resource_logging,
57085 + .maxlen = sizeof(int),
57086 + .mode = 0600,
57087 + .proc_handler = &proc_dointvec,
57088 + },
57089 +#endif
57090 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57091 + {
57092 + .procname = "audit_ptrace",
57093 + .data = &grsec_enable_audit_ptrace,
57094 + .maxlen = sizeof(int),
57095 + .mode = 0600,
57096 + .proc_handler = &proc_dointvec,
57097 + },
57098 +#endif
57099 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57100 + {
57101 + .procname = "harden_ptrace",
57102 + .data = &grsec_enable_harden_ptrace,
57103 + .maxlen = sizeof(int),
57104 + .mode = 0600,
57105 + .proc_handler = &proc_dointvec,
57106 + },
57107 +#endif
57108 + {
57109 + .procname = "grsec_lock",
57110 + .data = &grsec_lock,
57111 + .maxlen = sizeof(int),
57112 + .mode = 0600,
57113 + .proc_handler = &proc_dointvec,
57114 + },
57115 +#endif
57116 +#ifdef CONFIG_GRKERNSEC_ROFS
57117 + {
57118 + .procname = "romount_protect",
57119 + .data = &grsec_enable_rofs,
57120 + .maxlen = sizeof(int),
57121 + .mode = 0600,
57122 + .proc_handler = &proc_dointvec_minmax,
57123 + .extra1 = &one,
57124 + .extra2 = &one,
57125 + },
57126 +#endif
57127 + { }
57128 +};
57129 +#endif
57130 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
57131 new file mode 100644
57132 index 0000000..0dc13c3
57133 --- /dev/null
57134 +++ b/grsecurity/grsec_time.c
57135 @@ -0,0 +1,16 @@
57136 +#include <linux/kernel.h>
57137 +#include <linux/sched.h>
57138 +#include <linux/grinternal.h>
57139 +#include <linux/module.h>
57140 +
57141 +void
57142 +gr_log_timechange(void)
57143 +{
57144 +#ifdef CONFIG_GRKERNSEC_TIME
57145 + if (grsec_enable_time)
57146 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
57147 +#endif
57148 + return;
57149 +}
57150 +
57151 +EXPORT_SYMBOL(gr_log_timechange);
57152 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
57153 new file mode 100644
57154 index 0000000..07e0dc0
57155 --- /dev/null
57156 +++ b/grsecurity/grsec_tpe.c
57157 @@ -0,0 +1,73 @@
57158 +#include <linux/kernel.h>
57159 +#include <linux/sched.h>
57160 +#include <linux/file.h>
57161 +#include <linux/fs.h>
57162 +#include <linux/grinternal.h>
57163 +
57164 +extern int gr_acl_tpe_check(void);
57165 +
57166 +int
57167 +gr_tpe_allow(const struct file *file)
57168 +{
57169 +#ifdef CONFIG_GRKERNSEC
57170 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
57171 + const struct cred *cred = current_cred();
57172 + char *msg = NULL;
57173 + char *msg2 = NULL;
57174 +
57175 + // never restrict root
57176 + if (!cred->uid)
57177 + return 1;
57178 +
57179 + if (grsec_enable_tpe) {
57180 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57181 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
57182 + msg = "not being in trusted group";
57183 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
57184 + msg = "being in untrusted group";
57185 +#else
57186 + if (in_group_p(grsec_tpe_gid))
57187 + msg = "being in untrusted group";
57188 +#endif
57189 + }
57190 + if (!msg && gr_acl_tpe_check())
57191 + msg = "being in untrusted role";
57192 +
57193 + // not in any affected group/role
57194 + if (!msg)
57195 + goto next_check;
57196 +
57197 + if (inode->i_uid)
57198 + msg2 = "file in non-root-owned directory";
57199 + else if (inode->i_mode & S_IWOTH)
57200 + msg2 = "file in world-writable directory";
57201 + else if (inode->i_mode & S_IWGRP)
57202 + msg2 = "file in group-writable directory";
57203 +
57204 + if (msg && msg2) {
57205 + char fullmsg[70] = {0};
57206 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
57207 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
57208 + return 0;
57209 + }
57210 + msg = NULL;
57211 +next_check:
57212 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57213 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
57214 + return 1;
57215 +
57216 + if (inode->i_uid && (inode->i_uid != cred->uid))
57217 + msg = "directory not owned by user";
57218 + else if (inode->i_mode & S_IWOTH)
57219 + msg = "file in world-writable directory";
57220 + else if (inode->i_mode & S_IWGRP)
57221 + msg = "file in group-writable directory";
57222 +
57223 + if (msg) {
57224 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
57225 + return 0;
57226 + }
57227 +#endif
57228 +#endif
57229 + return 1;
57230 +}
57231 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
57232 new file mode 100644
57233 index 0000000..9f7b1ac
57234 --- /dev/null
57235 +++ b/grsecurity/grsum.c
57236 @@ -0,0 +1,61 @@
57237 +#include <linux/err.h>
57238 +#include <linux/kernel.h>
57239 +#include <linux/sched.h>
57240 +#include <linux/mm.h>
57241 +#include <linux/scatterlist.h>
57242 +#include <linux/crypto.h>
57243 +#include <linux/gracl.h>
57244 +
57245 +
57246 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57247 +#error "crypto and sha256 must be built into the kernel"
57248 +#endif
57249 +
57250 +int
57251 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57252 +{
57253 + char *p;
57254 + struct crypto_hash *tfm;
57255 + struct hash_desc desc;
57256 + struct scatterlist sg;
57257 + unsigned char temp_sum[GR_SHA_LEN];
57258 + volatile int retval = 0;
57259 + volatile int dummy = 0;
57260 + unsigned int i;
57261 +
57262 + sg_init_table(&sg, 1);
57263 +
57264 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57265 + if (IS_ERR(tfm)) {
57266 + /* should never happen, since sha256 should be built in */
57267 + return 1;
57268 + }
57269 +
57270 + desc.tfm = tfm;
57271 + desc.flags = 0;
57272 +
57273 + crypto_hash_init(&desc);
57274 +
57275 + p = salt;
57276 + sg_set_buf(&sg, p, GR_SALT_LEN);
57277 + crypto_hash_update(&desc, &sg, sg.length);
57278 +
57279 + p = entry->pw;
57280 + sg_set_buf(&sg, p, strlen(p));
57281 +
57282 + crypto_hash_update(&desc, &sg, sg.length);
57283 +
57284 + crypto_hash_final(&desc, temp_sum);
57285 +
57286 + memset(entry->pw, 0, GR_PW_LEN);
57287 +
57288 + for (i = 0; i < GR_SHA_LEN; i++)
57289 + if (sum[i] != temp_sum[i])
57290 + retval = 1;
57291 + else
57292 + dummy = 1; // waste a cycle
57293 +
57294 + crypto_free_hash(tfm);
57295 +
57296 + return retval;
57297 +}
57298 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57299 index 6cd5b64..f620d2d 100644
57300 --- a/include/acpi/acpi_bus.h
57301 +++ b/include/acpi/acpi_bus.h
57302 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57303 acpi_op_bind bind;
57304 acpi_op_unbind unbind;
57305 acpi_op_notify notify;
57306 -};
57307 +} __no_const;
57308
57309 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57310
57311 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57312 index b7babf0..71e4e74 100644
57313 --- a/include/asm-generic/atomic-long.h
57314 +++ b/include/asm-generic/atomic-long.h
57315 @@ -22,6 +22,12 @@
57316
57317 typedef atomic64_t atomic_long_t;
57318
57319 +#ifdef CONFIG_PAX_REFCOUNT
57320 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57321 +#else
57322 +typedef atomic64_t atomic_long_unchecked_t;
57323 +#endif
57324 +
57325 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57326
57327 static inline long atomic_long_read(atomic_long_t *l)
57328 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57329 return (long)atomic64_read(v);
57330 }
57331
57332 +#ifdef CONFIG_PAX_REFCOUNT
57333 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57334 +{
57335 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57336 +
57337 + return (long)atomic64_read_unchecked(v);
57338 +}
57339 +#endif
57340 +
57341 static inline void atomic_long_set(atomic_long_t *l, long i)
57342 {
57343 atomic64_t *v = (atomic64_t *)l;
57344 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57345 atomic64_set(v, i);
57346 }
57347
57348 +#ifdef CONFIG_PAX_REFCOUNT
57349 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57350 +{
57351 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57352 +
57353 + atomic64_set_unchecked(v, i);
57354 +}
57355 +#endif
57356 +
57357 static inline void atomic_long_inc(atomic_long_t *l)
57358 {
57359 atomic64_t *v = (atomic64_t *)l;
57360 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57361 atomic64_inc(v);
57362 }
57363
57364 +#ifdef CONFIG_PAX_REFCOUNT
57365 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57366 +{
57367 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57368 +
57369 + atomic64_inc_unchecked(v);
57370 +}
57371 +#endif
57372 +
57373 static inline void atomic_long_dec(atomic_long_t *l)
57374 {
57375 atomic64_t *v = (atomic64_t *)l;
57376 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57377 atomic64_dec(v);
57378 }
57379
57380 +#ifdef CONFIG_PAX_REFCOUNT
57381 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57382 +{
57383 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57384 +
57385 + atomic64_dec_unchecked(v);
57386 +}
57387 +#endif
57388 +
57389 static inline void atomic_long_add(long i, atomic_long_t *l)
57390 {
57391 atomic64_t *v = (atomic64_t *)l;
57392 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57393 atomic64_add(i, v);
57394 }
57395
57396 +#ifdef CONFIG_PAX_REFCOUNT
57397 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57398 +{
57399 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57400 +
57401 + atomic64_add_unchecked(i, v);
57402 +}
57403 +#endif
57404 +
57405 static inline void atomic_long_sub(long i, atomic_long_t *l)
57406 {
57407 atomic64_t *v = (atomic64_t *)l;
57408 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57409 atomic64_sub(i, v);
57410 }
57411
57412 +#ifdef CONFIG_PAX_REFCOUNT
57413 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57414 +{
57415 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57416 +
57417 + atomic64_sub_unchecked(i, v);
57418 +}
57419 +#endif
57420 +
57421 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57422 {
57423 atomic64_t *v = (atomic64_t *)l;
57424 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57425 return (long)atomic64_inc_return(v);
57426 }
57427
57428 +#ifdef CONFIG_PAX_REFCOUNT
57429 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57430 +{
57431 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57432 +
57433 + return (long)atomic64_inc_return_unchecked(v);
57434 +}
57435 +#endif
57436 +
57437 static inline long atomic_long_dec_return(atomic_long_t *l)
57438 {
57439 atomic64_t *v = (atomic64_t *)l;
57440 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57441
57442 typedef atomic_t atomic_long_t;
57443
57444 +#ifdef CONFIG_PAX_REFCOUNT
57445 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57446 +#else
57447 +typedef atomic_t atomic_long_unchecked_t;
57448 +#endif
57449 +
57450 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57451 static inline long atomic_long_read(atomic_long_t *l)
57452 {
57453 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57454 return (long)atomic_read(v);
57455 }
57456
57457 +#ifdef CONFIG_PAX_REFCOUNT
57458 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57459 +{
57460 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57461 +
57462 + return (long)atomic_read_unchecked(v);
57463 +}
57464 +#endif
57465 +
57466 static inline void atomic_long_set(atomic_long_t *l, long i)
57467 {
57468 atomic_t *v = (atomic_t *)l;
57469 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57470 atomic_set(v, i);
57471 }
57472
57473 +#ifdef CONFIG_PAX_REFCOUNT
57474 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57475 +{
57476 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57477 +
57478 + atomic_set_unchecked(v, i);
57479 +}
57480 +#endif
57481 +
57482 static inline void atomic_long_inc(atomic_long_t *l)
57483 {
57484 atomic_t *v = (atomic_t *)l;
57485 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57486 atomic_inc(v);
57487 }
57488
57489 +#ifdef CONFIG_PAX_REFCOUNT
57490 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57491 +{
57492 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57493 +
57494 + atomic_inc_unchecked(v);
57495 +}
57496 +#endif
57497 +
57498 static inline void atomic_long_dec(atomic_long_t *l)
57499 {
57500 atomic_t *v = (atomic_t *)l;
57501 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57502 atomic_dec(v);
57503 }
57504
57505 +#ifdef CONFIG_PAX_REFCOUNT
57506 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57507 +{
57508 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57509 +
57510 + atomic_dec_unchecked(v);
57511 +}
57512 +#endif
57513 +
57514 static inline void atomic_long_add(long i, atomic_long_t *l)
57515 {
57516 atomic_t *v = (atomic_t *)l;
57517 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57518 atomic_add(i, v);
57519 }
57520
57521 +#ifdef CONFIG_PAX_REFCOUNT
57522 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57523 +{
57524 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57525 +
57526 + atomic_add_unchecked(i, v);
57527 +}
57528 +#endif
57529 +
57530 static inline void atomic_long_sub(long i, atomic_long_t *l)
57531 {
57532 atomic_t *v = (atomic_t *)l;
57533 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57534 atomic_sub(i, v);
57535 }
57536
57537 +#ifdef CONFIG_PAX_REFCOUNT
57538 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57539 +{
57540 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57541 +
57542 + atomic_sub_unchecked(i, v);
57543 +}
57544 +#endif
57545 +
57546 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57547 {
57548 atomic_t *v = (atomic_t *)l;
57549 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57550 return (long)atomic_inc_return(v);
57551 }
57552
57553 +#ifdef CONFIG_PAX_REFCOUNT
57554 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57555 +{
57556 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57557 +
57558 + return (long)atomic_inc_return_unchecked(v);
57559 +}
57560 +#endif
57561 +
57562 static inline long atomic_long_dec_return(atomic_long_t *l)
57563 {
57564 atomic_t *v = (atomic_t *)l;
57565 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57566
57567 #endif /* BITS_PER_LONG == 64 */
57568
57569 +#ifdef CONFIG_PAX_REFCOUNT
57570 +static inline void pax_refcount_needs_these_functions(void)
57571 +{
57572 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57573 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57574 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57575 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57576 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57577 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57578 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57579 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57580 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57581 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57582 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57583 +
57584 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57585 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57586 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57587 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57588 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57589 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57590 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57591 +}
57592 +#else
57593 +#define atomic_read_unchecked(v) atomic_read(v)
57594 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57595 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57596 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57597 +#define atomic_inc_unchecked(v) atomic_inc(v)
57598 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57599 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57600 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57601 +#define atomic_dec_unchecked(v) atomic_dec(v)
57602 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57603 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57604 +
57605 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57606 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57607 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57608 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57609 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57610 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57611 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57612 +#endif
57613 +
57614 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57615 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57616 index b18ce4f..2ee2843 100644
57617 --- a/include/asm-generic/atomic64.h
57618 +++ b/include/asm-generic/atomic64.h
57619 @@ -16,6 +16,8 @@ typedef struct {
57620 long long counter;
57621 } atomic64_t;
57622
57623 +typedef atomic64_t atomic64_unchecked_t;
57624 +
57625 #define ATOMIC64_INIT(i) { (i) }
57626
57627 extern long long atomic64_read(const atomic64_t *v);
57628 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57629 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57630 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57631
57632 +#define atomic64_read_unchecked(v) atomic64_read(v)
57633 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57634 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57635 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57636 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57637 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
57638 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57639 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
57640 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57641 +
57642 #endif /* _ASM_GENERIC_ATOMIC64_H */
57643 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57644 index 1bfcfe5..e04c5c9 100644
57645 --- a/include/asm-generic/cache.h
57646 +++ b/include/asm-generic/cache.h
57647 @@ -6,7 +6,7 @@
57648 * cache lines need to provide their own cache.h.
57649 */
57650
57651 -#define L1_CACHE_SHIFT 5
57652 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57653 +#define L1_CACHE_SHIFT 5UL
57654 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57655
57656 #endif /* __ASM_GENERIC_CACHE_H */
57657 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57658 index 1ca3efc..e3dc852 100644
57659 --- a/include/asm-generic/int-l64.h
57660 +++ b/include/asm-generic/int-l64.h
57661 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57662 typedef signed long s64;
57663 typedef unsigned long u64;
57664
57665 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57666 +
57667 #define S8_C(x) x
57668 #define U8_C(x) x ## U
57669 #define S16_C(x) x
57670 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57671 index f394147..b6152b9 100644
57672 --- a/include/asm-generic/int-ll64.h
57673 +++ b/include/asm-generic/int-ll64.h
57674 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57675 typedef signed long long s64;
57676 typedef unsigned long long u64;
57677
57678 +typedef unsigned long long intoverflow_t;
57679 +
57680 #define S8_C(x) x
57681 #define U8_C(x) x ## U
57682 #define S16_C(x) x
57683 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57684 index 0232ccb..13d9165 100644
57685 --- a/include/asm-generic/kmap_types.h
57686 +++ b/include/asm-generic/kmap_types.h
57687 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57688 KMAP_D(17) KM_NMI,
57689 KMAP_D(18) KM_NMI_PTE,
57690 KMAP_D(19) KM_KDB,
57691 +KMAP_D(20) KM_CLEARPAGE,
57692 /*
57693 * Remember to update debug_kmap_atomic() when adding new kmap types!
57694 */
57695 -KMAP_D(20) KM_TYPE_NR
57696 +KMAP_D(21) KM_TYPE_NR
57697 };
57698
57699 #undef KMAP_D
57700 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57701 index 725612b..9cc513a 100644
57702 --- a/include/asm-generic/pgtable-nopmd.h
57703 +++ b/include/asm-generic/pgtable-nopmd.h
57704 @@ -1,14 +1,19 @@
57705 #ifndef _PGTABLE_NOPMD_H
57706 #define _PGTABLE_NOPMD_H
57707
57708 -#ifndef __ASSEMBLY__
57709 -
57710 #include <asm-generic/pgtable-nopud.h>
57711
57712 -struct mm_struct;
57713 -
57714 #define __PAGETABLE_PMD_FOLDED
57715
57716 +#define PMD_SHIFT PUD_SHIFT
57717 +#define PTRS_PER_PMD 1
57718 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57719 +#define PMD_MASK (~(PMD_SIZE-1))
57720 +
57721 +#ifndef __ASSEMBLY__
57722 +
57723 +struct mm_struct;
57724 +
57725 /*
57726 * Having the pmd type consist of a pud gets the size right, and allows
57727 * us to conceptually access the pud entry that this pmd is folded into
57728 @@ -16,11 +21,6 @@ struct mm_struct;
57729 */
57730 typedef struct { pud_t pud; } pmd_t;
57731
57732 -#define PMD_SHIFT PUD_SHIFT
57733 -#define PTRS_PER_PMD 1
57734 -#define PMD_SIZE (1UL << PMD_SHIFT)
57735 -#define PMD_MASK (~(PMD_SIZE-1))
57736 -
57737 /*
57738 * The "pud_xxx()" functions here are trivial for a folded two-level
57739 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57740 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57741 index 810431d..ccc3638 100644
57742 --- a/include/asm-generic/pgtable-nopud.h
57743 +++ b/include/asm-generic/pgtable-nopud.h
57744 @@ -1,10 +1,15 @@
57745 #ifndef _PGTABLE_NOPUD_H
57746 #define _PGTABLE_NOPUD_H
57747
57748 -#ifndef __ASSEMBLY__
57749 -
57750 #define __PAGETABLE_PUD_FOLDED
57751
57752 +#define PUD_SHIFT PGDIR_SHIFT
57753 +#define PTRS_PER_PUD 1
57754 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57755 +#define PUD_MASK (~(PUD_SIZE-1))
57756 +
57757 +#ifndef __ASSEMBLY__
57758 +
57759 /*
57760 * Having the pud type consist of a pgd gets the size right, and allows
57761 * us to conceptually access the pgd entry that this pud is folded into
57762 @@ -12,11 +17,6 @@
57763 */
57764 typedef struct { pgd_t pgd; } pud_t;
57765
57766 -#define PUD_SHIFT PGDIR_SHIFT
57767 -#define PTRS_PER_PUD 1
57768 -#define PUD_SIZE (1UL << PUD_SHIFT)
57769 -#define PUD_MASK (~(PUD_SIZE-1))
57770 -
57771 /*
57772 * The "pgd_xxx()" functions here are trivial for a folded two-level
57773 * setup: the pud is never bad, and a pud always exists (as it's folded
57774 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57775 index 76bff2b..c7a14e2 100644
57776 --- a/include/asm-generic/pgtable.h
57777 +++ b/include/asm-generic/pgtable.h
57778 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57779 #endif /* __HAVE_ARCH_PMD_WRITE */
57780 #endif
57781
57782 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57783 +static inline unsigned long pax_open_kernel(void) { return 0; }
57784 +#endif
57785 +
57786 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57787 +static inline unsigned long pax_close_kernel(void) { return 0; }
57788 +#endif
57789 +
57790 #endif /* !__ASSEMBLY__ */
57791
57792 #endif /* _ASM_GENERIC_PGTABLE_H */
57793 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57794 index b5e2e4c..6a5373e 100644
57795 --- a/include/asm-generic/vmlinux.lds.h
57796 +++ b/include/asm-generic/vmlinux.lds.h
57797 @@ -217,6 +217,7 @@
57798 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57799 VMLINUX_SYMBOL(__start_rodata) = .; \
57800 *(.rodata) *(.rodata.*) \
57801 + *(.data..read_only) \
57802 *(__vermagic) /* Kernel version magic */ \
57803 . = ALIGN(8); \
57804 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57805 @@ -722,17 +723,18 @@
57806 * section in the linker script will go there too. @phdr should have
57807 * a leading colon.
57808 *
57809 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57810 + * Note that this macros defines per_cpu_load as an absolute symbol.
57811 * If there is no need to put the percpu section at a predetermined
57812 * address, use PERCPU_SECTION.
57813 */
57814 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57815 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57816 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57817 + per_cpu_load = .; \
57818 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57819 - LOAD_OFFSET) { \
57820 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57821 PERCPU_INPUT(cacheline) \
57822 } phdr \
57823 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57824 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57825
57826 /**
57827 * PERCPU_SECTION - define output section for percpu area, simple version
57828 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57829 index bf4b2dc..2d0762f 100644
57830 --- a/include/drm/drmP.h
57831 +++ b/include/drm/drmP.h
57832 @@ -72,6 +72,7 @@
57833 #include <linux/workqueue.h>
57834 #include <linux/poll.h>
57835 #include <asm/pgalloc.h>
57836 +#include <asm/local.h>
57837 #include "drm.h"
57838
57839 #include <linux/idr.h>
57840 @@ -1038,7 +1039,7 @@ struct drm_device {
57841
57842 /** \name Usage Counters */
57843 /*@{ */
57844 - int open_count; /**< Outstanding files open */
57845 + local_t open_count; /**< Outstanding files open */
57846 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57847 atomic_t vma_count; /**< Outstanding vma areas open */
57848 int buf_use; /**< Buffers in use -- cannot alloc */
57849 @@ -1049,7 +1050,7 @@ struct drm_device {
57850 /*@{ */
57851 unsigned long counters;
57852 enum drm_stat_type types[15];
57853 - atomic_t counts[15];
57854 + atomic_unchecked_t counts[15];
57855 /*@} */
57856
57857 struct list_head filelist;
57858 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57859 index 73b0712..0b7ef2f 100644
57860 --- a/include/drm/drm_crtc_helper.h
57861 +++ b/include/drm/drm_crtc_helper.h
57862 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57863
57864 /* disable crtc when not in use - more explicit than dpms off */
57865 void (*disable)(struct drm_crtc *crtc);
57866 -};
57867 +} __no_const;
57868
57869 struct drm_encoder_helper_funcs {
57870 void (*dpms)(struct drm_encoder *encoder, int mode);
57871 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57872 struct drm_connector *connector);
57873 /* disable encoder when not in use - more explicit than dpms off */
57874 void (*disable)(struct drm_encoder *encoder);
57875 -};
57876 +} __no_const;
57877
57878 struct drm_connector_helper_funcs {
57879 int (*get_modes)(struct drm_connector *connector);
57880 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57881 index 26c1f78..6722682 100644
57882 --- a/include/drm/ttm/ttm_memory.h
57883 +++ b/include/drm/ttm/ttm_memory.h
57884 @@ -47,7 +47,7 @@
57885
57886 struct ttm_mem_shrink {
57887 int (*do_shrink) (struct ttm_mem_shrink *);
57888 -};
57889 +} __no_const;
57890
57891 /**
57892 * struct ttm_mem_global - Global memory accounting structure.
57893 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57894 index e86dfca..40cc55f 100644
57895 --- a/include/linux/a.out.h
57896 +++ b/include/linux/a.out.h
57897 @@ -39,6 +39,14 @@ enum machine_type {
57898 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57899 };
57900
57901 +/* Constants for the N_FLAGS field */
57902 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57903 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57904 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57905 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57906 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57907 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57908 +
57909 #if !defined (N_MAGIC)
57910 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57911 #endif
57912 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57913 index 49a83ca..df96b54 100644
57914 --- a/include/linux/atmdev.h
57915 +++ b/include/linux/atmdev.h
57916 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57917 #endif
57918
57919 struct k_atm_aal_stats {
57920 -#define __HANDLE_ITEM(i) atomic_t i
57921 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57922 __AAL_STAT_ITEMS
57923 #undef __HANDLE_ITEM
57924 };
57925 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57926 index fd88a39..8a801b4 100644
57927 --- a/include/linux/binfmts.h
57928 +++ b/include/linux/binfmts.h
57929 @@ -18,7 +18,7 @@ struct pt_regs;
57930 #define BINPRM_BUF_SIZE 128
57931
57932 #ifdef __KERNEL__
57933 -#include <linux/list.h>
57934 +#include <linux/sched.h>
57935
57936 #define CORENAME_MAX_SIZE 128
57937
57938 @@ -58,6 +58,7 @@ struct linux_binprm {
57939 unsigned interp_flags;
57940 unsigned interp_data;
57941 unsigned long loader, exec;
57942 + char tcomm[TASK_COMM_LEN];
57943 };
57944
57945 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
57946 @@ -88,6 +89,7 @@ struct linux_binfmt {
57947 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57948 int (*load_shlib)(struct file *);
57949 int (*core_dump)(struct coredump_params *cprm);
57950 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57951 unsigned long min_coredump; /* minimal dump size */
57952 };
57953
57954 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57955 index 0ed1eb0..3ab569b 100644
57956 --- a/include/linux/blkdev.h
57957 +++ b/include/linux/blkdev.h
57958 @@ -1315,7 +1315,7 @@ struct block_device_operations {
57959 /* this callback is with swap_lock and sometimes page table lock held */
57960 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57961 struct module *owner;
57962 -};
57963 +} __do_const;
57964
57965 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57966 unsigned long);
57967 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57968 index 4d1a074..88f929a 100644
57969 --- a/include/linux/blktrace_api.h
57970 +++ b/include/linux/blktrace_api.h
57971 @@ -162,7 +162,7 @@ struct blk_trace {
57972 struct dentry *dir;
57973 struct dentry *dropped_file;
57974 struct dentry *msg_file;
57975 - atomic_t dropped;
57976 + atomic_unchecked_t dropped;
57977 };
57978
57979 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57980 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57981 index 83195fb..0b0f77d 100644
57982 --- a/include/linux/byteorder/little_endian.h
57983 +++ b/include/linux/byteorder/little_endian.h
57984 @@ -42,51 +42,51 @@
57985
57986 static inline __le64 __cpu_to_le64p(const __u64 *p)
57987 {
57988 - return (__force __le64)*p;
57989 + return (__force const __le64)*p;
57990 }
57991 static inline __u64 __le64_to_cpup(const __le64 *p)
57992 {
57993 - return (__force __u64)*p;
57994 + return (__force const __u64)*p;
57995 }
57996 static inline __le32 __cpu_to_le32p(const __u32 *p)
57997 {
57998 - return (__force __le32)*p;
57999 + return (__force const __le32)*p;
58000 }
58001 static inline __u32 __le32_to_cpup(const __le32 *p)
58002 {
58003 - return (__force __u32)*p;
58004 + return (__force const __u32)*p;
58005 }
58006 static inline __le16 __cpu_to_le16p(const __u16 *p)
58007 {
58008 - return (__force __le16)*p;
58009 + return (__force const __le16)*p;
58010 }
58011 static inline __u16 __le16_to_cpup(const __le16 *p)
58012 {
58013 - return (__force __u16)*p;
58014 + return (__force const __u16)*p;
58015 }
58016 static inline __be64 __cpu_to_be64p(const __u64 *p)
58017 {
58018 - return (__force __be64)__swab64p(p);
58019 + return (__force const __be64)__swab64p(p);
58020 }
58021 static inline __u64 __be64_to_cpup(const __be64 *p)
58022 {
58023 - return __swab64p((__u64 *)p);
58024 + return __swab64p((const __u64 *)p);
58025 }
58026 static inline __be32 __cpu_to_be32p(const __u32 *p)
58027 {
58028 - return (__force __be32)__swab32p(p);
58029 + return (__force const __be32)__swab32p(p);
58030 }
58031 static inline __u32 __be32_to_cpup(const __be32 *p)
58032 {
58033 - return __swab32p((__u32 *)p);
58034 + return __swab32p((const __u32 *)p);
58035 }
58036 static inline __be16 __cpu_to_be16p(const __u16 *p)
58037 {
58038 - return (__force __be16)__swab16p(p);
58039 + return (__force const __be16)__swab16p(p);
58040 }
58041 static inline __u16 __be16_to_cpup(const __be16 *p)
58042 {
58043 - return __swab16p((__u16 *)p);
58044 + return __swab16p((const __u16 *)p);
58045 }
58046 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58047 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58048 diff --git a/include/linux/cache.h b/include/linux/cache.h
58049 index 4c57065..4307975 100644
58050 --- a/include/linux/cache.h
58051 +++ b/include/linux/cache.h
58052 @@ -16,6 +16,10 @@
58053 #define __read_mostly
58054 #endif
58055
58056 +#ifndef __read_only
58057 +#define __read_only __read_mostly
58058 +#endif
58059 +
58060 #ifndef ____cacheline_aligned
58061 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58062 #endif
58063 diff --git a/include/linux/capability.h b/include/linux/capability.h
58064 index a63d13d..069bfd5 100644
58065 --- a/include/linux/capability.h
58066 +++ b/include/linux/capability.h
58067 @@ -548,6 +548,9 @@ extern bool capable(int cap);
58068 extern bool ns_capable(struct user_namespace *ns, int cap);
58069 extern bool task_ns_capable(struct task_struct *t, int cap);
58070 extern bool nsown_capable(int cap);
58071 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
58072 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58073 +extern bool capable_nolog(int cap);
58074
58075 /* audit system wants to get cap info from files as well */
58076 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
58077 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
58078 index 04ffb2e..6799180 100644
58079 --- a/include/linux/cleancache.h
58080 +++ b/include/linux/cleancache.h
58081 @@ -31,7 +31,7 @@ struct cleancache_ops {
58082 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
58083 void (*flush_inode)(int, struct cleancache_filekey);
58084 void (*flush_fs)(int);
58085 -};
58086 +} __no_const;
58087
58088 extern struct cleancache_ops
58089 cleancache_register_ops(struct cleancache_ops *ops);
58090 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
58091 index dfadc96..c0e70c1 100644
58092 --- a/include/linux/compiler-gcc4.h
58093 +++ b/include/linux/compiler-gcc4.h
58094 @@ -31,6 +31,12 @@
58095
58096
58097 #if __GNUC_MINOR__ >= 5
58098 +
58099 +#ifdef CONSTIFY_PLUGIN
58100 +#define __no_const __attribute__((no_const))
58101 +#define __do_const __attribute__((do_const))
58102 +#endif
58103 +
58104 /*
58105 * Mark a position in code as unreachable. This can be used to
58106 * suppress control flow warnings after asm blocks that transfer
58107 @@ -46,6 +52,11 @@
58108 #define __noclone __attribute__((__noclone__))
58109
58110 #endif
58111 +
58112 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58113 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58114 +#define __bos0(ptr) __bos((ptr), 0)
58115 +#define __bos1(ptr) __bos((ptr), 1)
58116 #endif
58117
58118 #if __GNUC_MINOR__ > 0
58119 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
58120 index 320d6c9..8573a1c 100644
58121 --- a/include/linux/compiler.h
58122 +++ b/include/linux/compiler.h
58123 @@ -5,31 +5,62 @@
58124
58125 #ifdef __CHECKER__
58126 # define __user __attribute__((noderef, address_space(1)))
58127 +# define __force_user __force __user
58128 # define __kernel __attribute__((address_space(0)))
58129 +# define __force_kernel __force __kernel
58130 # define __safe __attribute__((safe))
58131 # define __force __attribute__((force))
58132 # define __nocast __attribute__((nocast))
58133 # define __iomem __attribute__((noderef, address_space(2)))
58134 +# define __force_iomem __force __iomem
58135 # define __acquires(x) __attribute__((context(x,0,1)))
58136 # define __releases(x) __attribute__((context(x,1,0)))
58137 # define __acquire(x) __context__(x,1)
58138 # define __release(x) __context__(x,-1)
58139 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
58140 # define __percpu __attribute__((noderef, address_space(3)))
58141 +# define __force_percpu __force __percpu
58142 #ifdef CONFIG_SPARSE_RCU_POINTER
58143 # define __rcu __attribute__((noderef, address_space(4)))
58144 +# define __force_rcu __force __rcu
58145 #else
58146 # define __rcu
58147 +# define __force_rcu
58148 #endif
58149 extern void __chk_user_ptr(const volatile void __user *);
58150 extern void __chk_io_ptr(const volatile void __iomem *);
58151 +#elif defined(CHECKER_PLUGIN)
58152 +//# define __user
58153 +//# define __force_user
58154 +//# define __kernel
58155 +//# define __force_kernel
58156 +# define __safe
58157 +# define __force
58158 +# define __nocast
58159 +# define __iomem
58160 +# define __force_iomem
58161 +# define __chk_user_ptr(x) (void)0
58162 +# define __chk_io_ptr(x) (void)0
58163 +# define __builtin_warning(x, y...) (1)
58164 +# define __acquires(x)
58165 +# define __releases(x)
58166 +# define __acquire(x) (void)0
58167 +# define __release(x) (void)0
58168 +# define __cond_lock(x,c) (c)
58169 +# define __percpu
58170 +# define __force_percpu
58171 +# define __rcu
58172 +# define __force_rcu
58173 #else
58174 # define __user
58175 +# define __force_user
58176 # define __kernel
58177 +# define __force_kernel
58178 # define __safe
58179 # define __force
58180 # define __nocast
58181 # define __iomem
58182 +# define __force_iomem
58183 # define __chk_user_ptr(x) (void)0
58184 # define __chk_io_ptr(x) (void)0
58185 # define __builtin_warning(x, y...) (1)
58186 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
58187 # define __release(x) (void)0
58188 # define __cond_lock(x,c) (c)
58189 # define __percpu
58190 +# define __force_percpu
58191 # define __rcu
58192 +# define __force_rcu
58193 #endif
58194
58195 #ifdef __KERNEL__
58196 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58197 # define __attribute_const__ /* unimplemented */
58198 #endif
58199
58200 +#ifndef __no_const
58201 +# define __no_const
58202 +#endif
58203 +
58204 +#ifndef __do_const
58205 +# define __do_const
58206 +#endif
58207 +
58208 /*
58209 * Tell gcc if a function is cold. The compiler will assume any path
58210 * directly leading to the call is unlikely.
58211 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58212 #define __cold
58213 #endif
58214
58215 +#ifndef __alloc_size
58216 +#define __alloc_size(...)
58217 +#endif
58218 +
58219 +#ifndef __bos
58220 +#define __bos(ptr, arg)
58221 +#endif
58222 +
58223 +#ifndef __bos0
58224 +#define __bos0(ptr)
58225 +#endif
58226 +
58227 +#ifndef __bos1
58228 +#define __bos1(ptr)
58229 +#endif
58230 +
58231 /* Simple shorthand for a section definition */
58232 #ifndef __section
58233 # define __section(S) __attribute__ ((__section__(#S)))
58234 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58235 * use is to mediate communication between process-level code and irq/NMI
58236 * handlers, all running on the same CPU.
58237 */
58238 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58239 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58240 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58241
58242 #endif /* __LINUX_COMPILER_H */
58243 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
58244 index e9eaec5..bfeb9bb 100644
58245 --- a/include/linux/cpuset.h
58246 +++ b/include/linux/cpuset.h
58247 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
58248 * nodemask.
58249 */
58250 smp_mb();
58251 - --ACCESS_ONCE(current->mems_allowed_change_disable);
58252 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58253 }
58254
58255 static inline void set_mems_allowed(nodemask_t nodemask)
58256 diff --git a/include/linux/cred.h b/include/linux/cred.h
58257 index 4030896..8d6f342 100644
58258 --- a/include/linux/cred.h
58259 +++ b/include/linux/cred.h
58260 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58261 static inline void validate_process_creds(void)
58262 {
58263 }
58264 +static inline void validate_task_creds(struct task_struct *task)
58265 +{
58266 +}
58267 #endif
58268
58269 /**
58270 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58271 index 8a94217..15d49e3 100644
58272 --- a/include/linux/crypto.h
58273 +++ b/include/linux/crypto.h
58274 @@ -365,7 +365,7 @@ struct cipher_tfm {
58275 const u8 *key, unsigned int keylen);
58276 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58277 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58278 -};
58279 +} __no_const;
58280
58281 struct hash_tfm {
58282 int (*init)(struct hash_desc *desc);
58283 @@ -386,13 +386,13 @@ struct compress_tfm {
58284 int (*cot_decompress)(struct crypto_tfm *tfm,
58285 const u8 *src, unsigned int slen,
58286 u8 *dst, unsigned int *dlen);
58287 -};
58288 +} __no_const;
58289
58290 struct rng_tfm {
58291 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58292 unsigned int dlen);
58293 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58294 -};
58295 +} __no_const;
58296
58297 #define crt_ablkcipher crt_u.ablkcipher
58298 #define crt_aead crt_u.aead
58299 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58300 index 7925bf0..d5143d2 100644
58301 --- a/include/linux/decompress/mm.h
58302 +++ b/include/linux/decompress/mm.h
58303 @@ -77,7 +77,7 @@ static void free(void *where)
58304 * warnings when not needed (indeed large_malloc / large_free are not
58305 * needed by inflate */
58306
58307 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58308 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58309 #define free(a) kfree(a)
58310
58311 #define large_malloc(a) vmalloc(a)
58312 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58313 index e13117c..e9fc938 100644
58314 --- a/include/linux/dma-mapping.h
58315 +++ b/include/linux/dma-mapping.h
58316 @@ -46,7 +46,7 @@ struct dma_map_ops {
58317 u64 (*get_required_mask)(struct device *dev);
58318 #endif
58319 int is_phys;
58320 -};
58321 +} __do_const;
58322
58323 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58324
58325 diff --git a/include/linux/efi.h b/include/linux/efi.h
58326 index 2362a0b..cfaf8fcc 100644
58327 --- a/include/linux/efi.h
58328 +++ b/include/linux/efi.h
58329 @@ -446,7 +446,7 @@ struct efivar_operations {
58330 efi_get_variable_t *get_variable;
58331 efi_get_next_variable_t *get_next_variable;
58332 efi_set_variable_t *set_variable;
58333 -};
58334 +} __no_const;
58335
58336 struct efivars {
58337 /*
58338 diff --git a/include/linux/elf.h b/include/linux/elf.h
58339 index 31f0508..5421c01 100644
58340 --- a/include/linux/elf.h
58341 +++ b/include/linux/elf.h
58342 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58343 #define PT_GNU_EH_FRAME 0x6474e550
58344
58345 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58346 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58347 +
58348 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58349 +
58350 +/* Constants for the e_flags field */
58351 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58352 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58353 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58354 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58355 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58356 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58357
58358 /*
58359 * Extended Numbering
58360 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58361 #define DT_DEBUG 21
58362 #define DT_TEXTREL 22
58363 #define DT_JMPREL 23
58364 +#define DT_FLAGS 30
58365 + #define DF_TEXTREL 0x00000004
58366 #define DT_ENCODING 32
58367 #define OLD_DT_LOOS 0x60000000
58368 #define DT_LOOS 0x6000000d
58369 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58370 #define PF_W 0x2
58371 #define PF_X 0x1
58372
58373 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58374 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58375 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58376 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58377 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58378 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58379 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58380 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58381 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58382 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58383 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58384 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58385 +
58386 typedef struct elf32_phdr{
58387 Elf32_Word p_type;
58388 Elf32_Off p_offset;
58389 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58390 #define EI_OSABI 7
58391 #define EI_PAD 8
58392
58393 +#define EI_PAX 14
58394 +
58395 #define ELFMAG0 0x7f /* EI_MAG */
58396 #define ELFMAG1 'E'
58397 #define ELFMAG2 'L'
58398 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58399 #define elf_note elf32_note
58400 #define elf_addr_t Elf32_Off
58401 #define Elf_Half Elf32_Half
58402 +#define elf_dyn Elf32_Dyn
58403
58404 #else
58405
58406 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58407 #define elf_note elf64_note
58408 #define elf_addr_t Elf64_Off
58409 #define Elf_Half Elf64_Half
58410 +#define elf_dyn Elf64_Dyn
58411
58412 #endif
58413
58414 diff --git a/include/linux/filter.h b/include/linux/filter.h
58415 index 8eeb205..d59bfa2 100644
58416 --- a/include/linux/filter.h
58417 +++ b/include/linux/filter.h
58418 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58419
58420 struct sk_buff;
58421 struct sock;
58422 +struct bpf_jit_work;
58423
58424 struct sk_filter
58425 {
58426 @@ -141,6 +142,9 @@ struct sk_filter
58427 unsigned int len; /* Number of filter blocks */
58428 unsigned int (*bpf_func)(const struct sk_buff *skb,
58429 const struct sock_filter *filter);
58430 +#ifdef CONFIG_BPF_JIT
58431 + struct bpf_jit_work *work;
58432 +#endif
58433 struct rcu_head rcu;
58434 struct sock_filter insns[0];
58435 };
58436 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58437 index 84ccf8e..2e9b14c 100644
58438 --- a/include/linux/firewire.h
58439 +++ b/include/linux/firewire.h
58440 @@ -428,7 +428,7 @@ struct fw_iso_context {
58441 union {
58442 fw_iso_callback_t sc;
58443 fw_iso_mc_callback_t mc;
58444 - } callback;
58445 + } __no_const callback;
58446 void *callback_data;
58447 };
58448
58449 diff --git a/include/linux/fs.h b/include/linux/fs.h
58450 index e0bc4ff..d79c2fa 100644
58451 --- a/include/linux/fs.h
58452 +++ b/include/linux/fs.h
58453 @@ -1608,7 +1608,8 @@ struct file_operations {
58454 int (*setlease)(struct file *, long, struct file_lock **);
58455 long (*fallocate)(struct file *file, int mode, loff_t offset,
58456 loff_t len);
58457 -};
58458 +} __do_const;
58459 +typedef struct file_operations __no_const file_operations_no_const;
58460
58461 struct inode_operations {
58462 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58463 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58464 index 003dc0f..3c4ea97 100644
58465 --- a/include/linux/fs_struct.h
58466 +++ b/include/linux/fs_struct.h
58467 @@ -6,7 +6,7 @@
58468 #include <linux/seqlock.h>
58469
58470 struct fs_struct {
58471 - int users;
58472 + atomic_t users;
58473 spinlock_t lock;
58474 seqcount_t seq;
58475 int umask;
58476 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58477 index ce31408..b1ad003 100644
58478 --- a/include/linux/fscache-cache.h
58479 +++ b/include/linux/fscache-cache.h
58480 @@ -102,7 +102,7 @@ struct fscache_operation {
58481 fscache_operation_release_t release;
58482 };
58483
58484 -extern atomic_t fscache_op_debug_id;
58485 +extern atomic_unchecked_t fscache_op_debug_id;
58486 extern void fscache_op_work_func(struct work_struct *work);
58487
58488 extern void fscache_enqueue_operation(struct fscache_operation *);
58489 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58490 {
58491 INIT_WORK(&op->work, fscache_op_work_func);
58492 atomic_set(&op->usage, 1);
58493 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58494 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58495 op->processor = processor;
58496 op->release = release;
58497 INIT_LIST_HEAD(&op->pend_link);
58498 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58499 index 2a53f10..0187fdf 100644
58500 --- a/include/linux/fsnotify.h
58501 +++ b/include/linux/fsnotify.h
58502 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58503 */
58504 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58505 {
58506 - return kstrdup(name, GFP_KERNEL);
58507 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58508 }
58509
58510 /*
58511 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58512 index 91d0e0a3..035666b 100644
58513 --- a/include/linux/fsnotify_backend.h
58514 +++ b/include/linux/fsnotify_backend.h
58515 @@ -105,6 +105,7 @@ struct fsnotify_ops {
58516 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58517 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58518 };
58519 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58520
58521 /*
58522 * A group is a "thing" that wants to receive notification about filesystem
58523 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58524 index c3da42d..c70e0df 100644
58525 --- a/include/linux/ftrace_event.h
58526 +++ b/include/linux/ftrace_event.h
58527 @@ -97,7 +97,7 @@ struct trace_event_functions {
58528 trace_print_func raw;
58529 trace_print_func hex;
58530 trace_print_func binary;
58531 -};
58532 +} __no_const;
58533
58534 struct trace_event {
58535 struct hlist_node node;
58536 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58537 extern int trace_add_event_call(struct ftrace_event_call *call);
58538 extern void trace_remove_event_call(struct ftrace_event_call *call);
58539
58540 -#define is_signed_type(type) (((type)(-1)) < 0)
58541 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58542
58543 int trace_set_clr_event(const char *system, const char *event, int set);
58544
58545 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58546 index 6d18f35..ab71e2c 100644
58547 --- a/include/linux/genhd.h
58548 +++ b/include/linux/genhd.h
58549 @@ -185,7 +185,7 @@ struct gendisk {
58550 struct kobject *slave_dir;
58551
58552 struct timer_rand_state *random;
58553 - atomic_t sync_io; /* RAID */
58554 + atomic_unchecked_t sync_io; /* RAID */
58555 struct disk_events *ev;
58556 #ifdef CONFIG_BLK_DEV_INTEGRITY
58557 struct blk_integrity *integrity;
58558 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58559 new file mode 100644
58560 index 0000000..8a130b6
58561 --- /dev/null
58562 +++ b/include/linux/gracl.h
58563 @@ -0,0 +1,319 @@
58564 +#ifndef GR_ACL_H
58565 +#define GR_ACL_H
58566 +
58567 +#include <linux/grdefs.h>
58568 +#include <linux/resource.h>
58569 +#include <linux/capability.h>
58570 +#include <linux/dcache.h>
58571 +#include <asm/resource.h>
58572 +
58573 +/* Major status information */
58574 +
58575 +#define GR_VERSION "grsecurity 2.9"
58576 +#define GRSECURITY_VERSION 0x2900
58577 +
58578 +enum {
58579 + GR_SHUTDOWN = 0,
58580 + GR_ENABLE = 1,
58581 + GR_SPROLE = 2,
58582 + GR_RELOAD = 3,
58583 + GR_SEGVMOD = 4,
58584 + GR_STATUS = 5,
58585 + GR_UNSPROLE = 6,
58586 + GR_PASSSET = 7,
58587 + GR_SPROLEPAM = 8,
58588 +};
58589 +
58590 +/* Password setup definitions
58591 + * kernel/grhash.c */
58592 +enum {
58593 + GR_PW_LEN = 128,
58594 + GR_SALT_LEN = 16,
58595 + GR_SHA_LEN = 32,
58596 +};
58597 +
58598 +enum {
58599 + GR_SPROLE_LEN = 64,
58600 +};
58601 +
58602 +enum {
58603 + GR_NO_GLOB = 0,
58604 + GR_REG_GLOB,
58605 + GR_CREATE_GLOB
58606 +};
58607 +
58608 +#define GR_NLIMITS 32
58609 +
58610 +/* Begin Data Structures */
58611 +
58612 +struct sprole_pw {
58613 + unsigned char *rolename;
58614 + unsigned char salt[GR_SALT_LEN];
58615 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58616 +};
58617 +
58618 +struct name_entry {
58619 + __u32 key;
58620 + ino_t inode;
58621 + dev_t device;
58622 + char *name;
58623 + __u16 len;
58624 + __u8 deleted;
58625 + struct name_entry *prev;
58626 + struct name_entry *next;
58627 +};
58628 +
58629 +struct inodev_entry {
58630 + struct name_entry *nentry;
58631 + struct inodev_entry *prev;
58632 + struct inodev_entry *next;
58633 +};
58634 +
58635 +struct acl_role_db {
58636 + struct acl_role_label **r_hash;
58637 + __u32 r_size;
58638 +};
58639 +
58640 +struct inodev_db {
58641 + struct inodev_entry **i_hash;
58642 + __u32 i_size;
58643 +};
58644 +
58645 +struct name_db {
58646 + struct name_entry **n_hash;
58647 + __u32 n_size;
58648 +};
58649 +
58650 +struct crash_uid {
58651 + uid_t uid;
58652 + unsigned long expires;
58653 +};
58654 +
58655 +struct gr_hash_struct {
58656 + void **table;
58657 + void **nametable;
58658 + void *first;
58659 + __u32 table_size;
58660 + __u32 used_size;
58661 + int type;
58662 +};
58663 +
58664 +/* Userspace Grsecurity ACL data structures */
58665 +
58666 +struct acl_subject_label {
58667 + char *filename;
58668 + ino_t inode;
58669 + dev_t device;
58670 + __u32 mode;
58671 + kernel_cap_t cap_mask;
58672 + kernel_cap_t cap_lower;
58673 + kernel_cap_t cap_invert_audit;
58674 +
58675 + struct rlimit res[GR_NLIMITS];
58676 + __u32 resmask;
58677 +
58678 + __u8 user_trans_type;
58679 + __u8 group_trans_type;
58680 + uid_t *user_transitions;
58681 + gid_t *group_transitions;
58682 + __u16 user_trans_num;
58683 + __u16 group_trans_num;
58684 +
58685 + __u32 sock_families[2];
58686 + __u32 ip_proto[8];
58687 + __u32 ip_type;
58688 + struct acl_ip_label **ips;
58689 + __u32 ip_num;
58690 + __u32 inaddr_any_override;
58691 +
58692 + __u32 crashes;
58693 + unsigned long expires;
58694 +
58695 + struct acl_subject_label *parent_subject;
58696 + struct gr_hash_struct *hash;
58697 + struct acl_subject_label *prev;
58698 + struct acl_subject_label *next;
58699 +
58700 + struct acl_object_label **obj_hash;
58701 + __u32 obj_hash_size;
58702 + __u16 pax_flags;
58703 +};
58704 +
58705 +struct role_allowed_ip {
58706 + __u32 addr;
58707 + __u32 netmask;
58708 +
58709 + struct role_allowed_ip *prev;
58710 + struct role_allowed_ip *next;
58711 +};
58712 +
58713 +struct role_transition {
58714 + char *rolename;
58715 +
58716 + struct role_transition *prev;
58717 + struct role_transition *next;
58718 +};
58719 +
58720 +struct acl_role_label {
58721 + char *rolename;
58722 + uid_t uidgid;
58723 + __u16 roletype;
58724 +
58725 + __u16 auth_attempts;
58726 + unsigned long expires;
58727 +
58728 + struct acl_subject_label *root_label;
58729 + struct gr_hash_struct *hash;
58730 +
58731 + struct acl_role_label *prev;
58732 + struct acl_role_label *next;
58733 +
58734 + struct role_transition *transitions;
58735 + struct role_allowed_ip *allowed_ips;
58736 + uid_t *domain_children;
58737 + __u16 domain_child_num;
58738 +
58739 + umode_t umask;
58740 +
58741 + struct acl_subject_label **subj_hash;
58742 + __u32 subj_hash_size;
58743 +};
58744 +
58745 +struct user_acl_role_db {
58746 + struct acl_role_label **r_table;
58747 + __u32 num_pointers; /* Number of allocations to track */
58748 + __u32 num_roles; /* Number of roles */
58749 + __u32 num_domain_children; /* Number of domain children */
58750 + __u32 num_subjects; /* Number of subjects */
58751 + __u32 num_objects; /* Number of objects */
58752 +};
58753 +
58754 +struct acl_object_label {
58755 + char *filename;
58756 + ino_t inode;
58757 + dev_t device;
58758 + __u32 mode;
58759 +
58760 + struct acl_subject_label *nested;
58761 + struct acl_object_label *globbed;
58762 +
58763 + /* next two structures not used */
58764 +
58765 + struct acl_object_label *prev;
58766 + struct acl_object_label *next;
58767 +};
58768 +
58769 +struct acl_ip_label {
58770 + char *iface;
58771 + __u32 addr;
58772 + __u32 netmask;
58773 + __u16 low, high;
58774 + __u8 mode;
58775 + __u32 type;
58776 + __u32 proto[8];
58777 +
58778 + /* next two structures not used */
58779 +
58780 + struct acl_ip_label *prev;
58781 + struct acl_ip_label *next;
58782 +};
58783 +
58784 +struct gr_arg {
58785 + struct user_acl_role_db role_db;
58786 + unsigned char pw[GR_PW_LEN];
58787 + unsigned char salt[GR_SALT_LEN];
58788 + unsigned char sum[GR_SHA_LEN];
58789 + unsigned char sp_role[GR_SPROLE_LEN];
58790 + struct sprole_pw *sprole_pws;
58791 + dev_t segv_device;
58792 + ino_t segv_inode;
58793 + uid_t segv_uid;
58794 + __u16 num_sprole_pws;
58795 + __u16 mode;
58796 +};
58797 +
58798 +struct gr_arg_wrapper {
58799 + struct gr_arg *arg;
58800 + __u32 version;
58801 + __u32 size;
58802 +};
58803 +
58804 +struct subject_map {
58805 + struct acl_subject_label *user;
58806 + struct acl_subject_label *kernel;
58807 + struct subject_map *prev;
58808 + struct subject_map *next;
58809 +};
58810 +
58811 +struct acl_subj_map_db {
58812 + struct subject_map **s_hash;
58813 + __u32 s_size;
58814 +};
58815 +
58816 +/* End Data Structures Section */
58817 +
58818 +/* Hash functions generated by empirical testing by Brad Spengler
58819 + Makes good use of the low bits of the inode. Generally 0-1 times
58820 + in loop for successful match. 0-3 for unsuccessful match.
58821 + Shift/add algorithm with modulus of table size and an XOR*/
58822 +
58823 +static __inline__ unsigned int
58824 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58825 +{
58826 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58827 +}
58828 +
58829 + static __inline__ unsigned int
58830 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58831 +{
58832 + return ((const unsigned long)userp % sz);
58833 +}
58834 +
58835 +static __inline__ unsigned int
58836 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58837 +{
58838 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58839 +}
58840 +
58841 +static __inline__ unsigned int
58842 +nhash(const char *name, const __u16 len, const unsigned int sz)
58843 +{
58844 + return full_name_hash((const unsigned char *)name, len) % sz;
58845 +}
58846 +
58847 +#define FOR_EACH_ROLE_START(role) \
58848 + role = role_list; \
58849 + while (role) {
58850 +
58851 +#define FOR_EACH_ROLE_END(role) \
58852 + role = role->prev; \
58853 + }
58854 +
58855 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58856 + subj = NULL; \
58857 + iter = 0; \
58858 + while (iter < role->subj_hash_size) { \
58859 + if (subj == NULL) \
58860 + subj = role->subj_hash[iter]; \
58861 + if (subj == NULL) { \
58862 + iter++; \
58863 + continue; \
58864 + }
58865 +
58866 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58867 + subj = subj->next; \
58868 + if (subj == NULL) \
58869 + iter++; \
58870 + }
58871 +
58872 +
58873 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58874 + subj = role->hash->first; \
58875 + while (subj != NULL) {
58876 +
58877 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58878 + subj = subj->next; \
58879 + }
58880 +
58881 +#endif
58882 +
58883 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58884 new file mode 100644
58885 index 0000000..323ecf2
58886 --- /dev/null
58887 +++ b/include/linux/gralloc.h
58888 @@ -0,0 +1,9 @@
58889 +#ifndef __GRALLOC_H
58890 +#define __GRALLOC_H
58891 +
58892 +void acl_free_all(void);
58893 +int acl_alloc_stack_init(unsigned long size);
58894 +void *acl_alloc(unsigned long len);
58895 +void *acl_alloc_num(unsigned long num, unsigned long len);
58896 +
58897 +#endif
58898 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58899 new file mode 100644
58900 index 0000000..b30e9bc
58901 --- /dev/null
58902 +++ b/include/linux/grdefs.h
58903 @@ -0,0 +1,140 @@
58904 +#ifndef GRDEFS_H
58905 +#define GRDEFS_H
58906 +
58907 +/* Begin grsecurity status declarations */
58908 +
58909 +enum {
58910 + GR_READY = 0x01,
58911 + GR_STATUS_INIT = 0x00 // disabled state
58912 +};
58913 +
58914 +/* Begin ACL declarations */
58915 +
58916 +/* Role flags */
58917 +
58918 +enum {
58919 + GR_ROLE_USER = 0x0001,
58920 + GR_ROLE_GROUP = 0x0002,
58921 + GR_ROLE_DEFAULT = 0x0004,
58922 + GR_ROLE_SPECIAL = 0x0008,
58923 + GR_ROLE_AUTH = 0x0010,
58924 + GR_ROLE_NOPW = 0x0020,
58925 + GR_ROLE_GOD = 0x0040,
58926 + GR_ROLE_LEARN = 0x0080,
58927 + GR_ROLE_TPE = 0x0100,
58928 + GR_ROLE_DOMAIN = 0x0200,
58929 + GR_ROLE_PAM = 0x0400,
58930 + GR_ROLE_PERSIST = 0x0800
58931 +};
58932 +
58933 +/* ACL Subject and Object mode flags */
58934 +enum {
58935 + GR_DELETED = 0x80000000
58936 +};
58937 +
58938 +/* ACL Object-only mode flags */
58939 +enum {
58940 + GR_READ = 0x00000001,
58941 + GR_APPEND = 0x00000002,
58942 + GR_WRITE = 0x00000004,
58943 + GR_EXEC = 0x00000008,
58944 + GR_FIND = 0x00000010,
58945 + GR_INHERIT = 0x00000020,
58946 + GR_SETID = 0x00000040,
58947 + GR_CREATE = 0x00000080,
58948 + GR_DELETE = 0x00000100,
58949 + GR_LINK = 0x00000200,
58950 + GR_AUDIT_READ = 0x00000400,
58951 + GR_AUDIT_APPEND = 0x00000800,
58952 + GR_AUDIT_WRITE = 0x00001000,
58953 + GR_AUDIT_EXEC = 0x00002000,
58954 + GR_AUDIT_FIND = 0x00004000,
58955 + GR_AUDIT_INHERIT= 0x00008000,
58956 + GR_AUDIT_SETID = 0x00010000,
58957 + GR_AUDIT_CREATE = 0x00020000,
58958 + GR_AUDIT_DELETE = 0x00040000,
58959 + GR_AUDIT_LINK = 0x00080000,
58960 + GR_PTRACERD = 0x00100000,
58961 + GR_NOPTRACE = 0x00200000,
58962 + GR_SUPPRESS = 0x00400000,
58963 + GR_NOLEARN = 0x00800000,
58964 + GR_INIT_TRANSFER= 0x01000000
58965 +};
58966 +
58967 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58968 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58969 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58970 +
58971 +/* ACL subject-only mode flags */
58972 +enum {
58973 + GR_KILL = 0x00000001,
58974 + GR_VIEW = 0x00000002,
58975 + GR_PROTECTED = 0x00000004,
58976 + GR_LEARN = 0x00000008,
58977 + GR_OVERRIDE = 0x00000010,
58978 + /* just a placeholder, this mode is only used in userspace */
58979 + GR_DUMMY = 0x00000020,
58980 + GR_PROTSHM = 0x00000040,
58981 + GR_KILLPROC = 0x00000080,
58982 + GR_KILLIPPROC = 0x00000100,
58983 + /* just a placeholder, this mode is only used in userspace */
58984 + GR_NOTROJAN = 0x00000200,
58985 + GR_PROTPROCFD = 0x00000400,
58986 + GR_PROCACCT = 0x00000800,
58987 + GR_RELAXPTRACE = 0x00001000,
58988 + GR_NESTED = 0x00002000,
58989 + GR_INHERITLEARN = 0x00004000,
58990 + GR_PROCFIND = 0x00008000,
58991 + GR_POVERRIDE = 0x00010000,
58992 + GR_KERNELAUTH = 0x00020000,
58993 + GR_ATSECURE = 0x00040000,
58994 + GR_SHMEXEC = 0x00080000
58995 +};
58996 +
58997 +enum {
58998 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58999 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
59000 + GR_PAX_ENABLE_MPROTECT = 0x0004,
59001 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
59002 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
59003 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
59004 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
59005 + GR_PAX_DISABLE_MPROTECT = 0x0400,
59006 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
59007 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
59008 +};
59009 +
59010 +enum {
59011 + GR_ID_USER = 0x01,
59012 + GR_ID_GROUP = 0x02,
59013 +};
59014 +
59015 +enum {
59016 + GR_ID_ALLOW = 0x01,
59017 + GR_ID_DENY = 0x02,
59018 +};
59019 +
59020 +#define GR_CRASH_RES 31
59021 +#define GR_UIDTABLE_MAX 500
59022 +
59023 +/* begin resource learning section */
59024 +enum {
59025 + GR_RLIM_CPU_BUMP = 60,
59026 + GR_RLIM_FSIZE_BUMP = 50000,
59027 + GR_RLIM_DATA_BUMP = 10000,
59028 + GR_RLIM_STACK_BUMP = 1000,
59029 + GR_RLIM_CORE_BUMP = 10000,
59030 + GR_RLIM_RSS_BUMP = 500000,
59031 + GR_RLIM_NPROC_BUMP = 1,
59032 + GR_RLIM_NOFILE_BUMP = 5,
59033 + GR_RLIM_MEMLOCK_BUMP = 50000,
59034 + GR_RLIM_AS_BUMP = 500000,
59035 + GR_RLIM_LOCKS_BUMP = 2,
59036 + GR_RLIM_SIGPENDING_BUMP = 5,
59037 + GR_RLIM_MSGQUEUE_BUMP = 10000,
59038 + GR_RLIM_NICE_BUMP = 1,
59039 + GR_RLIM_RTPRIO_BUMP = 1,
59040 + GR_RLIM_RTTIME_BUMP = 1000000
59041 +};
59042 +
59043 +#endif
59044 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
59045 new file mode 100644
59046 index 0000000..da390f1
59047 --- /dev/null
59048 +++ b/include/linux/grinternal.h
59049 @@ -0,0 +1,221 @@
59050 +#ifndef __GRINTERNAL_H
59051 +#define __GRINTERNAL_H
59052 +
59053 +#ifdef CONFIG_GRKERNSEC
59054 +
59055 +#include <linux/fs.h>
59056 +#include <linux/mnt_namespace.h>
59057 +#include <linux/nsproxy.h>
59058 +#include <linux/gracl.h>
59059 +#include <linux/grdefs.h>
59060 +#include <linux/grmsg.h>
59061 +
59062 +void gr_add_learn_entry(const char *fmt, ...)
59063 + __attribute__ ((format (printf, 1, 2)));
59064 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59065 + const struct vfsmount *mnt);
59066 +__u32 gr_check_create(const struct dentry *new_dentry,
59067 + const struct dentry *parent,
59068 + const struct vfsmount *mnt, const __u32 mode);
59069 +int gr_check_protected_task(const struct task_struct *task);
59070 +__u32 to_gr_audit(const __u32 reqmode);
59071 +int gr_set_acls(const int type);
59072 +int gr_apply_subject_to_task(struct task_struct *task);
59073 +int gr_acl_is_enabled(void);
59074 +char gr_roletype_to_char(void);
59075 +
59076 +void gr_handle_alertkill(struct task_struct *task);
59077 +char *gr_to_filename(const struct dentry *dentry,
59078 + const struct vfsmount *mnt);
59079 +char *gr_to_filename1(const struct dentry *dentry,
59080 + const struct vfsmount *mnt);
59081 +char *gr_to_filename2(const struct dentry *dentry,
59082 + const struct vfsmount *mnt);
59083 +char *gr_to_filename3(const struct dentry *dentry,
59084 + const struct vfsmount *mnt);
59085 +
59086 +extern int grsec_enable_ptrace_readexec;
59087 +extern int grsec_enable_harden_ptrace;
59088 +extern int grsec_enable_link;
59089 +extern int grsec_enable_fifo;
59090 +extern int grsec_enable_execve;
59091 +extern int grsec_enable_shm;
59092 +extern int grsec_enable_execlog;
59093 +extern int grsec_enable_signal;
59094 +extern int grsec_enable_audit_ptrace;
59095 +extern int grsec_enable_forkfail;
59096 +extern int grsec_enable_time;
59097 +extern int grsec_enable_rofs;
59098 +extern int grsec_enable_chroot_shmat;
59099 +extern int grsec_enable_chroot_mount;
59100 +extern int grsec_enable_chroot_double;
59101 +extern int grsec_enable_chroot_pivot;
59102 +extern int grsec_enable_chroot_chdir;
59103 +extern int grsec_enable_chroot_chmod;
59104 +extern int grsec_enable_chroot_mknod;
59105 +extern int grsec_enable_chroot_fchdir;
59106 +extern int grsec_enable_chroot_nice;
59107 +extern int grsec_enable_chroot_execlog;
59108 +extern int grsec_enable_chroot_caps;
59109 +extern int grsec_enable_chroot_sysctl;
59110 +extern int grsec_enable_chroot_unix;
59111 +extern int grsec_enable_tpe;
59112 +extern int grsec_tpe_gid;
59113 +extern int grsec_enable_tpe_all;
59114 +extern int grsec_enable_tpe_invert;
59115 +extern int grsec_enable_socket_all;
59116 +extern int grsec_socket_all_gid;
59117 +extern int grsec_enable_socket_client;
59118 +extern int grsec_socket_client_gid;
59119 +extern int grsec_enable_socket_server;
59120 +extern int grsec_socket_server_gid;
59121 +extern int grsec_audit_gid;
59122 +extern int grsec_enable_group;
59123 +extern int grsec_enable_audit_textrel;
59124 +extern int grsec_enable_log_rwxmaps;
59125 +extern int grsec_enable_mount;
59126 +extern int grsec_enable_chdir;
59127 +extern int grsec_resource_logging;
59128 +extern int grsec_enable_blackhole;
59129 +extern int grsec_lastack_retries;
59130 +extern int grsec_enable_brute;
59131 +extern int grsec_lock;
59132 +
59133 +extern spinlock_t grsec_alert_lock;
59134 +extern unsigned long grsec_alert_wtime;
59135 +extern unsigned long grsec_alert_fyet;
59136 +
59137 +extern spinlock_t grsec_audit_lock;
59138 +
59139 +extern rwlock_t grsec_exec_file_lock;
59140 +
59141 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59142 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59143 + (tsk)->exec_file->f_vfsmnt) : "/")
59144 +
59145 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59146 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59147 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59148 +
59149 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59150 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
59151 + (tsk)->exec_file->f_vfsmnt) : "/")
59152 +
59153 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59154 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59155 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59156 +
59157 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59158 +
59159 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59160 +
59161 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59162 + (task)->pid, (cred)->uid, \
59163 + (cred)->euid, (cred)->gid, (cred)->egid, \
59164 + gr_parent_task_fullpath(task), \
59165 + (task)->real_parent->comm, (task)->real_parent->pid, \
59166 + (pcred)->uid, (pcred)->euid, \
59167 + (pcred)->gid, (pcred)->egid
59168 +
59169 +#define GR_CHROOT_CAPS {{ \
59170 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59171 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59172 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59173 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59174 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59175 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
59176 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
59177 +
59178 +#define security_learn(normal_msg,args...) \
59179 +({ \
59180 + read_lock(&grsec_exec_file_lock); \
59181 + gr_add_learn_entry(normal_msg "\n", ## args); \
59182 + read_unlock(&grsec_exec_file_lock); \
59183 +})
59184 +
59185 +enum {
59186 + GR_DO_AUDIT,
59187 + GR_DONT_AUDIT,
59188 + /* used for non-audit messages that we shouldn't kill the task on */
59189 + GR_DONT_AUDIT_GOOD
59190 +};
59191 +
59192 +enum {
59193 + GR_TTYSNIFF,
59194 + GR_RBAC,
59195 + GR_RBAC_STR,
59196 + GR_STR_RBAC,
59197 + GR_RBAC_MODE2,
59198 + GR_RBAC_MODE3,
59199 + GR_FILENAME,
59200 + GR_SYSCTL_HIDDEN,
59201 + GR_NOARGS,
59202 + GR_ONE_INT,
59203 + GR_ONE_INT_TWO_STR,
59204 + GR_ONE_STR,
59205 + GR_STR_INT,
59206 + GR_TWO_STR_INT,
59207 + GR_TWO_INT,
59208 + GR_TWO_U64,
59209 + GR_THREE_INT,
59210 + GR_FIVE_INT_TWO_STR,
59211 + GR_TWO_STR,
59212 + GR_THREE_STR,
59213 + GR_FOUR_STR,
59214 + GR_STR_FILENAME,
59215 + GR_FILENAME_STR,
59216 + GR_FILENAME_TWO_INT,
59217 + GR_FILENAME_TWO_INT_STR,
59218 + GR_TEXTREL,
59219 + GR_PTRACE,
59220 + GR_RESOURCE,
59221 + GR_CAP,
59222 + GR_SIG,
59223 + GR_SIG2,
59224 + GR_CRASH1,
59225 + GR_CRASH2,
59226 + GR_PSACCT,
59227 + GR_RWXMAP
59228 +};
59229 +
59230 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59231 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59232 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59233 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59234 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59235 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59236 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59237 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59238 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59239 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59240 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59241 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59242 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59243 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59244 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59245 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59246 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59247 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59248 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59249 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59250 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59251 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59252 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59253 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59254 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59255 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59256 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59257 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59258 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59259 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59260 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59261 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59262 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59263 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59264 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59265 +
59266 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59267 +
59268 +#endif
59269 +
59270 +#endif
59271 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59272 new file mode 100644
59273 index 0000000..ae576a1
59274 --- /dev/null
59275 +++ b/include/linux/grmsg.h
59276 @@ -0,0 +1,109 @@
59277 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59278 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59279 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59280 +#define GR_STOPMOD_MSG "denied modification of module state by "
59281 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59282 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59283 +#define GR_IOPERM_MSG "denied use of ioperm() by "
59284 +#define GR_IOPL_MSG "denied use of iopl() by "
59285 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59286 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59287 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59288 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59289 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59290 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59291 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59292 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59293 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59294 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59295 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59296 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59297 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59298 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59299 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59300 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59301 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59302 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59303 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59304 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59305 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59306 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59307 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59308 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59309 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59310 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59311 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
59312 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59313 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59314 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59315 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59316 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59317 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59318 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59319 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59320 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59321 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59322 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59323 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59324 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59325 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59326 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59327 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59328 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
59329 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59330 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59331 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59332 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59333 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59334 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59335 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59336 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59337 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59338 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59339 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59340 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59341 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59342 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59343 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59344 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59345 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59346 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59347 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59348 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59349 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59350 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59351 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59352 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59353 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59354 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59355 +#define GR_TIME_MSG "time set by "
59356 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59357 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59358 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59359 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59360 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59361 +#define GR_BIND_MSG "denied bind() by "
59362 +#define GR_CONNECT_MSG "denied connect() by "
59363 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59364 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59365 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59366 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59367 +#define GR_CAP_ACL_MSG "use of %s denied for "
59368 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59369 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59370 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59371 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59372 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59373 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59374 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59375 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59376 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59377 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59378 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59379 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59380 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59381 +#define GR_VM86_MSG "denied use of vm86 by "
59382 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59383 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59384 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59385 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
59386 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59387 new file mode 100644
59388 index 0000000..2ccf677
59389 --- /dev/null
59390 +++ b/include/linux/grsecurity.h
59391 @@ -0,0 +1,229 @@
59392 +#ifndef GR_SECURITY_H
59393 +#define GR_SECURITY_H
59394 +#include <linux/fs.h>
59395 +#include <linux/fs_struct.h>
59396 +#include <linux/binfmts.h>
59397 +#include <linux/gracl.h>
59398 +
59399 +/* notify of brain-dead configs */
59400 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59401 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59402 +#endif
59403 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59404 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59405 +#endif
59406 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59407 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59408 +#endif
59409 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59410 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59411 +#endif
59412 +
59413 +#include <linux/compat.h>
59414 +
59415 +struct user_arg_ptr {
59416 +#ifdef CONFIG_COMPAT
59417 + bool is_compat;
59418 +#endif
59419 + union {
59420 + const char __user *const __user *native;
59421 +#ifdef CONFIG_COMPAT
59422 + compat_uptr_t __user *compat;
59423 +#endif
59424 + } ptr;
59425 +};
59426 +
59427 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59428 +void gr_handle_brute_check(void);
59429 +void gr_handle_kernel_exploit(void);
59430 +int gr_process_user_ban(void);
59431 +
59432 +char gr_roletype_to_char(void);
59433 +
59434 +int gr_acl_enable_at_secure(void);
59435 +
59436 +int gr_check_user_change(int real, int effective, int fs);
59437 +int gr_check_group_change(int real, int effective, int fs);
59438 +
59439 +void gr_del_task_from_ip_table(struct task_struct *p);
59440 +
59441 +int gr_pid_is_chrooted(struct task_struct *p);
59442 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59443 +int gr_handle_chroot_nice(void);
59444 +int gr_handle_chroot_sysctl(const int op);
59445 +int gr_handle_chroot_setpriority(struct task_struct *p,
59446 + const int niceval);
59447 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59448 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59449 + const struct vfsmount *mnt);
59450 +void gr_handle_chroot_chdir(struct path *path);
59451 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59452 + const struct vfsmount *mnt, const int mode);
59453 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59454 + const struct vfsmount *mnt, const int mode);
59455 +int gr_handle_chroot_mount(const struct dentry *dentry,
59456 + const struct vfsmount *mnt,
59457 + const char *dev_name);
59458 +int gr_handle_chroot_pivot(void);
59459 +int gr_handle_chroot_unix(const pid_t pid);
59460 +
59461 +int gr_handle_rawio(const struct inode *inode);
59462 +
59463 +void gr_handle_ioperm(void);
59464 +void gr_handle_iopl(void);
59465 +
59466 +umode_t gr_acl_umask(void);
59467 +
59468 +int gr_tpe_allow(const struct file *file);
59469 +
59470 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59471 +void gr_clear_chroot_entries(struct task_struct *task);
59472 +
59473 +void gr_log_forkfail(const int retval);
59474 +void gr_log_timechange(void);
59475 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59476 +void gr_log_chdir(const struct dentry *dentry,
59477 + const struct vfsmount *mnt);
59478 +void gr_log_chroot_exec(const struct dentry *dentry,
59479 + const struct vfsmount *mnt);
59480 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59481 +void gr_log_remount(const char *devname, const int retval);
59482 +void gr_log_unmount(const char *devname, const int retval);
59483 +void gr_log_mount(const char *from, const char *to, const int retval);
59484 +void gr_log_textrel(struct vm_area_struct *vma);
59485 +void gr_log_rwxmmap(struct file *file);
59486 +void gr_log_rwxmprotect(struct file *file);
59487 +
59488 +int gr_handle_follow_link(const struct inode *parent,
59489 + const struct inode *inode,
59490 + const struct dentry *dentry,
59491 + const struct vfsmount *mnt);
59492 +int gr_handle_fifo(const struct dentry *dentry,
59493 + const struct vfsmount *mnt,
59494 + const struct dentry *dir, const int flag,
59495 + const int acc_mode);
59496 +int gr_handle_hardlink(const struct dentry *dentry,
59497 + const struct vfsmount *mnt,
59498 + struct inode *inode,
59499 + const int mode, const char *to);
59500 +
59501 +int gr_is_capable(const int cap);
59502 +int gr_is_capable_nolog(const int cap);
59503 +void gr_learn_resource(const struct task_struct *task, const int limit,
59504 + const unsigned long wanted, const int gt);
59505 +void gr_copy_label(struct task_struct *tsk);
59506 +void gr_handle_crash(struct task_struct *task, const int sig);
59507 +int gr_handle_signal(const struct task_struct *p, const int sig);
59508 +int gr_check_crash_uid(const uid_t uid);
59509 +int gr_check_protected_task(const struct task_struct *task);
59510 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59511 +int gr_acl_handle_mmap(const struct file *file,
59512 + const unsigned long prot);
59513 +int gr_acl_handle_mprotect(const struct file *file,
59514 + const unsigned long prot);
59515 +int gr_check_hidden_task(const struct task_struct *tsk);
59516 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59517 + const struct vfsmount *mnt);
59518 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59519 + const struct vfsmount *mnt);
59520 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59521 + const struct vfsmount *mnt, const int fmode);
59522 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59523 + const struct vfsmount *mnt, umode_t *mode);
59524 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59525 + const struct vfsmount *mnt);
59526 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59527 + const struct vfsmount *mnt);
59528 +int gr_handle_ptrace(struct task_struct *task, const long request);
59529 +int gr_handle_proc_ptrace(struct task_struct *task);
59530 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59531 + const struct vfsmount *mnt);
59532 +int gr_check_crash_exec(const struct file *filp);
59533 +int gr_acl_is_enabled(void);
59534 +void gr_set_kernel_label(struct task_struct *task);
59535 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59536 + const gid_t gid);
59537 +int gr_set_proc_label(const struct dentry *dentry,
59538 + const struct vfsmount *mnt,
59539 + const int unsafe_flags);
59540 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59541 + const struct vfsmount *mnt);
59542 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59543 + const struct vfsmount *mnt, int acc_mode);
59544 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59545 + const struct dentry *p_dentry,
59546 + const struct vfsmount *p_mnt,
59547 + int open_flags, int acc_mode, const int imode);
59548 +void gr_handle_create(const struct dentry *dentry,
59549 + const struct vfsmount *mnt);
59550 +void gr_handle_proc_create(const struct dentry *dentry,
59551 + const struct inode *inode);
59552 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59553 + const struct dentry *parent_dentry,
59554 + const struct vfsmount *parent_mnt,
59555 + const int mode);
59556 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59557 + const struct dentry *parent_dentry,
59558 + const struct vfsmount *parent_mnt);
59559 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59560 + const struct vfsmount *mnt);
59561 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59562 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59563 + const struct vfsmount *mnt);
59564 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59565 + const struct dentry *parent_dentry,
59566 + const struct vfsmount *parent_mnt,
59567 + const char *from);
59568 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59569 + const struct dentry *parent_dentry,
59570 + const struct vfsmount *parent_mnt,
59571 + const struct dentry *old_dentry,
59572 + const struct vfsmount *old_mnt, const char *to);
59573 +int gr_acl_handle_rename(struct dentry *new_dentry,
59574 + struct dentry *parent_dentry,
59575 + const struct vfsmount *parent_mnt,
59576 + struct dentry *old_dentry,
59577 + struct inode *old_parent_inode,
59578 + struct vfsmount *old_mnt, const char *newname);
59579 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59580 + struct dentry *old_dentry,
59581 + struct dentry *new_dentry,
59582 + struct vfsmount *mnt, const __u8 replace);
59583 +__u32 gr_check_link(const struct dentry *new_dentry,
59584 + const struct dentry *parent_dentry,
59585 + const struct vfsmount *parent_mnt,
59586 + const struct dentry *old_dentry,
59587 + const struct vfsmount *old_mnt);
59588 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59589 + const unsigned int namelen, const ino_t ino);
59590 +
59591 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59592 + const struct vfsmount *mnt);
59593 +void gr_acl_handle_exit(void);
59594 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59595 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59596 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59597 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59598 +void gr_audit_ptrace(struct task_struct *task);
59599 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59600 +
59601 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59602 +
59603 +#ifdef CONFIG_GRKERNSEC
59604 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59605 +void gr_handle_vm86(void);
59606 +void gr_handle_mem_readwrite(u64 from, u64 to);
59607 +
59608 +void gr_log_badprocpid(const char *entry);
59609 +
59610 +extern int grsec_enable_dmesg;
59611 +extern int grsec_disable_privio;
59612 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59613 +extern int grsec_enable_chroot_findtask;
59614 +#endif
59615 +#ifdef CONFIG_GRKERNSEC_SETXID
59616 +extern int grsec_enable_setxid;
59617 +#endif
59618 +#endif
59619 +
59620 +#endif
59621 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59622 new file mode 100644
59623 index 0000000..e7ffaaf
59624 --- /dev/null
59625 +++ b/include/linux/grsock.h
59626 @@ -0,0 +1,19 @@
59627 +#ifndef __GRSOCK_H
59628 +#define __GRSOCK_H
59629 +
59630 +extern void gr_attach_curr_ip(const struct sock *sk);
59631 +extern int gr_handle_sock_all(const int family, const int type,
59632 + const int protocol);
59633 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59634 +extern int gr_handle_sock_server_other(const struct sock *sck);
59635 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59636 +extern int gr_search_connect(struct socket * sock,
59637 + struct sockaddr_in * addr);
59638 +extern int gr_search_bind(struct socket * sock,
59639 + struct sockaddr_in * addr);
59640 +extern int gr_search_listen(struct socket * sock);
59641 +extern int gr_search_accept(struct socket * sock);
59642 +extern int gr_search_socket(const int domain, const int type,
59643 + const int protocol);
59644 +
59645 +#endif
59646 diff --git a/include/linux/hid.h b/include/linux/hid.h
59647 index c235e4e..f0cf7a0 100644
59648 --- a/include/linux/hid.h
59649 +++ b/include/linux/hid.h
59650 @@ -679,7 +679,7 @@ struct hid_ll_driver {
59651 unsigned int code, int value);
59652
59653 int (*parse)(struct hid_device *hdev);
59654 -};
59655 +} __no_const;
59656
59657 #define PM_HINT_FULLON 1<<5
59658 #define PM_HINT_NORMAL 1<<1
59659 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59660 index 3a93f73..b19d0b3 100644
59661 --- a/include/linux/highmem.h
59662 +++ b/include/linux/highmem.h
59663 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59664 kunmap_atomic(kaddr, KM_USER0);
59665 }
59666
59667 +static inline void sanitize_highpage(struct page *page)
59668 +{
59669 + void *kaddr;
59670 + unsigned long flags;
59671 +
59672 + local_irq_save(flags);
59673 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59674 + clear_page(kaddr);
59675 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59676 + local_irq_restore(flags);
59677 +}
59678 +
59679 static inline void zero_user_segments(struct page *page,
59680 unsigned start1, unsigned end1,
59681 unsigned start2, unsigned end2)
59682 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59683 index 07d103a..04ec65b 100644
59684 --- a/include/linux/i2c.h
59685 +++ b/include/linux/i2c.h
59686 @@ -364,6 +364,7 @@ struct i2c_algorithm {
59687 /* To determine what the adapter supports */
59688 u32 (*functionality) (struct i2c_adapter *);
59689 };
59690 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59691
59692 /*
59693 * i2c_adapter is the structure used to identify a physical i2c bus along
59694 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59695 index a6deef4..c56a7f2 100644
59696 --- a/include/linux/i2o.h
59697 +++ b/include/linux/i2o.h
59698 @@ -564,7 +564,7 @@ struct i2o_controller {
59699 struct i2o_device *exec; /* Executive */
59700 #if BITS_PER_LONG == 64
59701 spinlock_t context_list_lock; /* lock for context_list */
59702 - atomic_t context_list_counter; /* needed for unique contexts */
59703 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59704 struct list_head context_list; /* list of context id's
59705 and pointers */
59706 #endif
59707 diff --git a/include/linux/init.h b/include/linux/init.h
59708 index 9146f39..885354d 100644
59709 --- a/include/linux/init.h
59710 +++ b/include/linux/init.h
59711 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59712
59713 /* Each module must use one module_init(). */
59714 #define module_init(initfn) \
59715 - static inline initcall_t __inittest(void) \
59716 + static inline __used initcall_t __inittest(void) \
59717 { return initfn; } \
59718 int init_module(void) __attribute__((alias(#initfn)));
59719
59720 /* This is only required if you want to be unloadable. */
59721 #define module_exit(exitfn) \
59722 - static inline exitcall_t __exittest(void) \
59723 + static inline __used exitcall_t __exittest(void) \
59724 { return exitfn; } \
59725 void cleanup_module(void) __attribute__((alias(#exitfn)));
59726
59727 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59728 index 32574ee..00d4ef1 100644
59729 --- a/include/linux/init_task.h
59730 +++ b/include/linux/init_task.h
59731 @@ -128,6 +128,12 @@ extern struct cred init_cred;
59732
59733 #define INIT_TASK_COMM "swapper"
59734
59735 +#ifdef CONFIG_X86
59736 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59737 +#else
59738 +#define INIT_TASK_THREAD_INFO
59739 +#endif
59740 +
59741 /*
59742 * INIT_TASK is used to set up the first task table, touch at
59743 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59744 @@ -166,6 +172,7 @@ extern struct cred init_cred;
59745 RCU_INIT_POINTER(.cred, &init_cred), \
59746 .comm = INIT_TASK_COMM, \
59747 .thread = INIT_THREAD, \
59748 + INIT_TASK_THREAD_INFO \
59749 .fs = &init_fs, \
59750 .files = &init_files, \
59751 .signal = &init_signals, \
59752 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59753 index e6ca56d..8583707 100644
59754 --- a/include/linux/intel-iommu.h
59755 +++ b/include/linux/intel-iommu.h
59756 @@ -296,7 +296,7 @@ struct iommu_flush {
59757 u8 fm, u64 type);
59758 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59759 unsigned int size_order, u64 type);
59760 -};
59761 +} __no_const;
59762
59763 enum {
59764 SR_DMAR_FECTL_REG,
59765 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59766 index a64b00e..464d8bc 100644
59767 --- a/include/linux/interrupt.h
59768 +++ b/include/linux/interrupt.h
59769 @@ -441,7 +441,7 @@ enum
59770 /* map softirq index to softirq name. update 'softirq_to_name' in
59771 * kernel/softirq.c when adding a new softirq.
59772 */
59773 -extern char *softirq_to_name[NR_SOFTIRQS];
59774 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59775
59776 /* softirq mask and active fields moved to irq_cpustat_t in
59777 * asm/hardirq.h to get better cache usage. KAO
59778 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59779
59780 struct softirq_action
59781 {
59782 - void (*action)(struct softirq_action *);
59783 + void (*action)(void);
59784 };
59785
59786 asmlinkage void do_softirq(void);
59787 asmlinkage void __do_softirq(void);
59788 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59789 +extern void open_softirq(int nr, void (*action)(void));
59790 extern void softirq_init(void);
59791 static inline void __raise_softirq_irqoff(unsigned int nr)
59792 {
59793 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59794 index 3875719..4cd454c 100644
59795 --- a/include/linux/kallsyms.h
59796 +++ b/include/linux/kallsyms.h
59797 @@ -15,7 +15,8 @@
59798
59799 struct module;
59800
59801 -#ifdef CONFIG_KALLSYMS
59802 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59803 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59804 /* Lookup the address for a symbol. Returns 0 if not found. */
59805 unsigned long kallsyms_lookup_name(const char *name);
59806
59807 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59808 /* Stupid that this does nothing, but I didn't create this mess. */
59809 #define __print_symbol(fmt, addr)
59810 #endif /*CONFIG_KALLSYMS*/
59811 +#else /* when included by kallsyms.c, vsnprintf.c, or
59812 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59813 +extern void __print_symbol(const char *fmt, unsigned long address);
59814 +extern int sprint_backtrace(char *buffer, unsigned long address);
59815 +extern int sprint_symbol(char *buffer, unsigned long address);
59816 +const char *kallsyms_lookup(unsigned long addr,
59817 + unsigned long *symbolsize,
59818 + unsigned long *offset,
59819 + char **modname, char *namebuf);
59820 +#endif
59821
59822 /* This macro allows us to keep printk typechecking */
59823 static __printf(1, 2)
59824 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59825 index fa39183..40160be 100644
59826 --- a/include/linux/kgdb.h
59827 +++ b/include/linux/kgdb.h
59828 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59829 extern int kgdb_io_module_registered;
59830
59831 extern atomic_t kgdb_setting_breakpoint;
59832 -extern atomic_t kgdb_cpu_doing_single_step;
59833 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59834
59835 extern struct task_struct *kgdb_usethread;
59836 extern struct task_struct *kgdb_contthread;
59837 @@ -251,7 +251,7 @@ struct kgdb_arch {
59838 void (*disable_hw_break)(struct pt_regs *regs);
59839 void (*remove_all_hw_break)(void);
59840 void (*correct_hw_break)(void);
59841 -};
59842 +} __do_const;
59843
59844 /**
59845 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59846 @@ -276,7 +276,7 @@ struct kgdb_io {
59847 void (*pre_exception) (void);
59848 void (*post_exception) (void);
59849 int is_console;
59850 -};
59851 +} __do_const;
59852
59853 extern struct kgdb_arch arch_kgdb_ops;
59854
59855 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59856 index b16f653..eb908f4 100644
59857 --- a/include/linux/kmod.h
59858 +++ b/include/linux/kmod.h
59859 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59860 * usually useless though. */
59861 extern __printf(2, 3)
59862 int __request_module(bool wait, const char *name, ...);
59863 +extern __printf(3, 4)
59864 +int ___request_module(bool wait, char *param_name, const char *name, ...);
59865 #define request_module(mod...) __request_module(true, mod)
59866 #define request_module_nowait(mod...) __request_module(false, mod)
59867 #define try_then_request_module(x, mod...) \
59868 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59869 index d526231..086e89b 100644
59870 --- a/include/linux/kvm_host.h
59871 +++ b/include/linux/kvm_host.h
59872 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59873 void vcpu_load(struct kvm_vcpu *vcpu);
59874 void vcpu_put(struct kvm_vcpu *vcpu);
59875
59876 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59877 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59878 struct module *module);
59879 void kvm_exit(void);
59880
59881 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59882 struct kvm_guest_debug *dbg);
59883 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59884
59885 -int kvm_arch_init(void *opaque);
59886 +int kvm_arch_init(const void *opaque);
59887 void kvm_arch_exit(void);
59888
59889 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59890 diff --git a/include/linux/libata.h b/include/linux/libata.h
59891 index cafc09a..d7e7829 100644
59892 --- a/include/linux/libata.h
59893 +++ b/include/linux/libata.h
59894 @@ -909,7 +909,7 @@ struct ata_port_operations {
59895 * fields must be pointers.
59896 */
59897 const struct ata_port_operations *inherits;
59898 -};
59899 +} __do_const;
59900
59901 struct ata_port_info {
59902 unsigned long flags;
59903 diff --git a/include/linux/mca.h b/include/linux/mca.h
59904 index 3797270..7765ede 100644
59905 --- a/include/linux/mca.h
59906 +++ b/include/linux/mca.h
59907 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59908 int region);
59909 void * (*mca_transform_memory)(struct mca_device *,
59910 void *memory);
59911 -};
59912 +} __no_const;
59913
59914 struct mca_bus {
59915 u64 default_dma_mask;
59916 diff --git a/include/linux/memory.h b/include/linux/memory.h
59917 index 935699b..11042cc 100644
59918 --- a/include/linux/memory.h
59919 +++ b/include/linux/memory.h
59920 @@ -144,7 +144,7 @@ struct memory_accessor {
59921 size_t count);
59922 ssize_t (*write)(struct memory_accessor *, const char *buf,
59923 off_t offset, size_t count);
59924 -};
59925 +} __no_const;
59926
59927 /*
59928 * Kernel text modification mutex, used for code patching. Users of this lock
59929 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59930 index 9970337..9444122 100644
59931 --- a/include/linux/mfd/abx500.h
59932 +++ b/include/linux/mfd/abx500.h
59933 @@ -188,6 +188,7 @@ struct abx500_ops {
59934 int (*event_registers_startup_state_get) (struct device *, u8 *);
59935 int (*startup_irq_enabled) (struct device *, unsigned int);
59936 };
59937 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59938
59939 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59940 void abx500_remove_ops(struct device *dev);
59941 diff --git a/include/linux/mm.h b/include/linux/mm.h
59942 index 4baadd1..2e0b45e 100644
59943 --- a/include/linux/mm.h
59944 +++ b/include/linux/mm.h
59945 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59946
59947 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59948 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59949 +
59950 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59951 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59952 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59953 +#else
59954 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59955 +#endif
59956 +
59957 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59958 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59959
59960 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59961 int set_page_dirty_lock(struct page *page);
59962 int clear_page_dirty_for_io(struct page *page);
59963
59964 -/* Is the vma a continuation of the stack vma above it? */
59965 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59966 -{
59967 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59968 -}
59969 -
59970 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59971 - unsigned long addr)
59972 -{
59973 - return (vma->vm_flags & VM_GROWSDOWN) &&
59974 - (vma->vm_start == addr) &&
59975 - !vma_growsdown(vma->vm_prev, addr);
59976 -}
59977 -
59978 -/* Is the vma a continuation of the stack vma below it? */
59979 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59980 -{
59981 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59982 -}
59983 -
59984 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59985 - unsigned long addr)
59986 -{
59987 - return (vma->vm_flags & VM_GROWSUP) &&
59988 - (vma->vm_end == addr) &&
59989 - !vma_growsup(vma->vm_next, addr);
59990 -}
59991 -
59992 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59993 unsigned long old_addr, struct vm_area_struct *new_vma,
59994 unsigned long new_addr, unsigned long len);
59995 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59996 }
59997 #endif
59998
59999 +#ifdef CONFIG_MMU
60000 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
60001 +#else
60002 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
60003 +{
60004 + return __pgprot(0);
60005 +}
60006 +#endif
60007 +
60008 int vma_wants_writenotify(struct vm_area_struct *vma);
60009
60010 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
60011 @@ -1419,6 +1407,7 @@ out:
60012 }
60013
60014 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
60015 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
60016
60017 extern unsigned long do_brk(unsigned long, unsigned long);
60018
60019 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
60020 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
60021 struct vm_area_struct **pprev);
60022
60023 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60024 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60025 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60026 +
60027 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60028 NULL if none. Assume start_addr < end_addr. */
60029 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60030 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
60031 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60032 }
60033
60034 -#ifdef CONFIG_MMU
60035 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
60036 -#else
60037 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
60038 -{
60039 - return __pgprot(0);
60040 -}
60041 -#endif
60042 -
60043 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60044 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60045 unsigned long pfn, unsigned long size, pgprot_t);
60046 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
60047 extern int sysctl_memory_failure_early_kill;
60048 extern int sysctl_memory_failure_recovery;
60049 extern void shake_page(struct page *p, int access);
60050 -extern atomic_long_t mce_bad_pages;
60051 +extern atomic_long_unchecked_t mce_bad_pages;
60052 extern int soft_offline_page(struct page *page, int flags);
60053
60054 extern void dump_page(struct page *page);
60055 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
60056 unsigned int pages_per_huge_page);
60057 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
60058
60059 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60060 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60061 +#else
60062 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60063 +#endif
60064 +
60065 #endif /* __KERNEL__ */
60066 #endif /* _LINUX_MM_H */
60067 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
60068 index 5b42f1b..759e4b4 100644
60069 --- a/include/linux/mm_types.h
60070 +++ b/include/linux/mm_types.h
60071 @@ -253,6 +253,8 @@ struct vm_area_struct {
60072 #ifdef CONFIG_NUMA
60073 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60074 #endif
60075 +
60076 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60077 };
60078
60079 struct core_thread {
60080 @@ -389,6 +391,24 @@ struct mm_struct {
60081 #ifdef CONFIG_CPUMASK_OFFSTACK
60082 struct cpumask cpumask_allocation;
60083 #endif
60084 +
60085 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60086 + unsigned long pax_flags;
60087 +#endif
60088 +
60089 +#ifdef CONFIG_PAX_DLRESOLVE
60090 + unsigned long call_dl_resolve;
60091 +#endif
60092 +
60093 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60094 + unsigned long call_syscall;
60095 +#endif
60096 +
60097 +#ifdef CONFIG_PAX_ASLR
60098 + unsigned long delta_mmap; /* randomized offset */
60099 + unsigned long delta_stack; /* randomized offset */
60100 +#endif
60101 +
60102 };
60103
60104 static inline void mm_init_cpumask(struct mm_struct *mm)
60105 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
60106 index 1d1b1e1..2a13c78 100644
60107 --- a/include/linux/mmu_notifier.h
60108 +++ b/include/linux/mmu_notifier.h
60109 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
60110 */
60111 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60112 ({ \
60113 - pte_t __pte; \
60114 + pte_t ___pte; \
60115 struct vm_area_struct *___vma = __vma; \
60116 unsigned long ___address = __address; \
60117 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60118 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60119 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60120 - __pte; \
60121 + ___pte; \
60122 })
60123
60124 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
60125 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
60126 index 188cb2f..d78409b 100644
60127 --- a/include/linux/mmzone.h
60128 +++ b/include/linux/mmzone.h
60129 @@ -369,7 +369,7 @@ struct zone {
60130 unsigned long flags; /* zone flags, see below */
60131
60132 /* Zone statistics */
60133 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60134 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60135
60136 /*
60137 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
60138 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
60139 index 468819c..17b9db3 100644
60140 --- a/include/linux/mod_devicetable.h
60141 +++ b/include/linux/mod_devicetable.h
60142 @@ -12,7 +12,7 @@
60143 typedef unsigned long kernel_ulong_t;
60144 #endif
60145
60146 -#define PCI_ANY_ID (~0)
60147 +#define PCI_ANY_ID ((__u16)~0)
60148
60149 struct pci_device_id {
60150 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60151 @@ -131,7 +131,7 @@ struct usb_device_id {
60152 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60153 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60154
60155 -#define HID_ANY_ID (~0)
60156 +#define HID_ANY_ID (~0U)
60157
60158 struct hid_device_id {
60159 __u16 bus;
60160 diff --git a/include/linux/module.h b/include/linux/module.h
60161 index 3cb7839..511cb87 100644
60162 --- a/include/linux/module.h
60163 +++ b/include/linux/module.h
60164 @@ -17,6 +17,7 @@
60165 #include <linux/moduleparam.h>
60166 #include <linux/tracepoint.h>
60167 #include <linux/export.h>
60168 +#include <linux/fs.h>
60169
60170 #include <linux/percpu.h>
60171 #include <asm/module.h>
60172 @@ -261,19 +262,16 @@ struct module
60173 int (*init)(void);
60174
60175 /* If this is non-NULL, vfree after init() returns */
60176 - void *module_init;
60177 + void *module_init_rx, *module_init_rw;
60178
60179 /* Here is the actual code + data, vfree'd on unload. */
60180 - void *module_core;
60181 + void *module_core_rx, *module_core_rw;
60182
60183 /* Here are the sizes of the init and core sections */
60184 - unsigned int init_size, core_size;
60185 + unsigned int init_size_rw, core_size_rw;
60186
60187 /* The size of the executable code in each section. */
60188 - unsigned int init_text_size, core_text_size;
60189 -
60190 - /* Size of RO sections of the module (text+rodata) */
60191 - unsigned int init_ro_size, core_ro_size;
60192 + unsigned int init_size_rx, core_size_rx;
60193
60194 /* Arch-specific module values */
60195 struct mod_arch_specific arch;
60196 @@ -329,6 +327,10 @@ struct module
60197 #ifdef CONFIG_EVENT_TRACING
60198 struct ftrace_event_call **trace_events;
60199 unsigned int num_trace_events;
60200 + struct file_operations trace_id;
60201 + struct file_operations trace_enable;
60202 + struct file_operations trace_format;
60203 + struct file_operations trace_filter;
60204 #endif
60205 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60206 unsigned int num_ftrace_callsites;
60207 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
60208 bool is_module_percpu_address(unsigned long addr);
60209 bool is_module_text_address(unsigned long addr);
60210
60211 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60212 +{
60213 +
60214 +#ifdef CONFIG_PAX_KERNEXEC
60215 + if (ktla_ktva(addr) >= (unsigned long)start &&
60216 + ktla_ktva(addr) < (unsigned long)start + size)
60217 + return 1;
60218 +#endif
60219 +
60220 + return ((void *)addr >= start && (void *)addr < start + size);
60221 +}
60222 +
60223 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60224 +{
60225 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60226 +}
60227 +
60228 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60229 +{
60230 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60231 +}
60232 +
60233 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60234 +{
60235 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60236 +}
60237 +
60238 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60239 +{
60240 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60241 +}
60242 +
60243 static inline int within_module_core(unsigned long addr, struct module *mod)
60244 {
60245 - return (unsigned long)mod->module_core <= addr &&
60246 - addr < (unsigned long)mod->module_core + mod->core_size;
60247 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60248 }
60249
60250 static inline int within_module_init(unsigned long addr, struct module *mod)
60251 {
60252 - return (unsigned long)mod->module_init <= addr &&
60253 - addr < (unsigned long)mod->module_init + mod->init_size;
60254 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60255 }
60256
60257 /* Search for module by name: must hold module_mutex. */
60258 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60259 index b2be02e..6a9fdb1 100644
60260 --- a/include/linux/moduleloader.h
60261 +++ b/include/linux/moduleloader.h
60262 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60263 sections. Returns NULL on failure. */
60264 void *module_alloc(unsigned long size);
60265
60266 +#ifdef CONFIG_PAX_KERNEXEC
60267 +void *module_alloc_exec(unsigned long size);
60268 +#else
60269 +#define module_alloc_exec(x) module_alloc(x)
60270 +#endif
60271 +
60272 /* Free memory returned from module_alloc. */
60273 void module_free(struct module *mod, void *module_region);
60274
60275 +#ifdef CONFIG_PAX_KERNEXEC
60276 +void module_free_exec(struct module *mod, void *module_region);
60277 +#else
60278 +#define module_free_exec(x, y) module_free((x), (y))
60279 +#endif
60280 +
60281 /* Apply the given relocation to the (simplified) ELF. Return -error
60282 or 0. */
60283 int apply_relocate(Elf_Shdr *sechdrs,
60284 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60285 index 7939f63..ec6df57 100644
60286 --- a/include/linux/moduleparam.h
60287 +++ b/include/linux/moduleparam.h
60288 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60289 * @len is usually just sizeof(string).
60290 */
60291 #define module_param_string(name, string, len, perm) \
60292 - static const struct kparam_string __param_string_##name \
60293 + static const struct kparam_string __param_string_##name __used \
60294 = { len, string }; \
60295 __module_param_call(MODULE_PARAM_PREFIX, name, \
60296 &param_ops_string, \
60297 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60298 * module_param_named() for why this might be necessary.
60299 */
60300 #define module_param_array_named(name, array, type, nump, perm) \
60301 - static const struct kparam_array __param_arr_##name \
60302 + static const struct kparam_array __param_arr_##name __used \
60303 = { .max = ARRAY_SIZE(array), .num = nump, \
60304 .ops = &param_ops_##type, \
60305 .elemsize = sizeof(array[0]), .elem = array }; \
60306 diff --git a/include/linux/namei.h b/include/linux/namei.h
60307 index ffc0213..2c1f2cb 100644
60308 --- a/include/linux/namei.h
60309 +++ b/include/linux/namei.h
60310 @@ -24,7 +24,7 @@ struct nameidata {
60311 unsigned seq;
60312 int last_type;
60313 unsigned depth;
60314 - char *saved_names[MAX_NESTED_LINKS + 1];
60315 + const char *saved_names[MAX_NESTED_LINKS + 1];
60316
60317 /* Intent data */
60318 union {
60319 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60320 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60321 extern void unlock_rename(struct dentry *, struct dentry *);
60322
60323 -static inline void nd_set_link(struct nameidata *nd, char *path)
60324 +static inline void nd_set_link(struct nameidata *nd, const char *path)
60325 {
60326 nd->saved_names[nd->depth] = path;
60327 }
60328
60329 -static inline char *nd_get_link(struct nameidata *nd)
60330 +static inline const char *nd_get_link(const struct nameidata *nd)
60331 {
60332 return nd->saved_names[nd->depth];
60333 }
60334 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60335 index a82ad4d..90d15b7 100644
60336 --- a/include/linux/netdevice.h
60337 +++ b/include/linux/netdevice.h
60338 @@ -949,6 +949,7 @@ struct net_device_ops {
60339 int (*ndo_set_features)(struct net_device *dev,
60340 u32 features);
60341 };
60342 +typedef struct net_device_ops __no_const net_device_ops_no_const;
60343
60344 /*
60345 * The DEVICE structure.
60346 @@ -1088,7 +1089,7 @@ struct net_device {
60347 int iflink;
60348
60349 struct net_device_stats stats;
60350 - atomic_long_t rx_dropped; /* dropped packets by core network
60351 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60352 * Do not use this in drivers.
60353 */
60354
60355 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60356 new file mode 100644
60357 index 0000000..33f4af8
60358 --- /dev/null
60359 +++ b/include/linux/netfilter/xt_gradm.h
60360 @@ -0,0 +1,9 @@
60361 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
60362 +#define _LINUX_NETFILTER_XT_GRADM_H 1
60363 +
60364 +struct xt_gradm_mtinfo {
60365 + __u16 flags;
60366 + __u16 invflags;
60367 +};
60368 +
60369 +#endif
60370 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60371 index c65a18a..0c05f3a 100644
60372 --- a/include/linux/of_pdt.h
60373 +++ b/include/linux/of_pdt.h
60374 @@ -32,7 +32,7 @@ struct of_pdt_ops {
60375
60376 /* return 0 on success; fill in 'len' with number of bytes in path */
60377 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60378 -};
60379 +} __no_const;
60380
60381 extern void *prom_early_alloc(unsigned long size);
60382
60383 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60384 index a4c5624..79d6d88 100644
60385 --- a/include/linux/oprofile.h
60386 +++ b/include/linux/oprofile.h
60387 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60388 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60389 char const * name, ulong * val);
60390
60391 -/** Create a file for read-only access to an atomic_t. */
60392 +/** Create a file for read-only access to an atomic_unchecked_t. */
60393 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60394 - char const * name, atomic_t * val);
60395 + char const * name, atomic_unchecked_t * val);
60396
60397 /** create a directory */
60398 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60399 diff --git a/include/linux/padata.h b/include/linux/padata.h
60400 index 4633b2f..988bc08 100644
60401 --- a/include/linux/padata.h
60402 +++ b/include/linux/padata.h
60403 @@ -129,7 +129,7 @@ struct parallel_data {
60404 struct padata_instance *pinst;
60405 struct padata_parallel_queue __percpu *pqueue;
60406 struct padata_serial_queue __percpu *squeue;
60407 - atomic_t seq_nr;
60408 + atomic_unchecked_t seq_nr;
60409 atomic_t reorder_objects;
60410 atomic_t refcnt;
60411 unsigned int max_seq_nr;
60412 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60413 index b1f8912..c955bff 100644
60414 --- a/include/linux/perf_event.h
60415 +++ b/include/linux/perf_event.h
60416 @@ -748,8 +748,8 @@ struct perf_event {
60417
60418 enum perf_event_active_state state;
60419 unsigned int attach_state;
60420 - local64_t count;
60421 - atomic64_t child_count;
60422 + local64_t count; /* PaX: fix it one day */
60423 + atomic64_unchecked_t child_count;
60424
60425 /*
60426 * These are the total time in nanoseconds that the event
60427 @@ -800,8 +800,8 @@ struct perf_event {
60428 * These accumulate total time (in nanoseconds) that children
60429 * events have been enabled and running, respectively.
60430 */
60431 - atomic64_t child_total_time_enabled;
60432 - atomic64_t child_total_time_running;
60433 + atomic64_unchecked_t child_total_time_enabled;
60434 + atomic64_unchecked_t child_total_time_running;
60435
60436 /*
60437 * Protect attach/detach and child_list:
60438 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60439 index 77257c9..51d473a 100644
60440 --- a/include/linux/pipe_fs_i.h
60441 +++ b/include/linux/pipe_fs_i.h
60442 @@ -46,9 +46,9 @@ struct pipe_buffer {
60443 struct pipe_inode_info {
60444 wait_queue_head_t wait;
60445 unsigned int nrbufs, curbuf, buffers;
60446 - unsigned int readers;
60447 - unsigned int writers;
60448 - unsigned int waiting_writers;
60449 + atomic_t readers;
60450 + atomic_t writers;
60451 + atomic_t waiting_writers;
60452 unsigned int r_counter;
60453 unsigned int w_counter;
60454 struct page *tmp_page;
60455 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60456 index d3085e7..fd01052 100644
60457 --- a/include/linux/pm_runtime.h
60458 +++ b/include/linux/pm_runtime.h
60459 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60460
60461 static inline void pm_runtime_mark_last_busy(struct device *dev)
60462 {
60463 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60464 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60465 }
60466
60467 #else /* !CONFIG_PM_RUNTIME */
60468 diff --git a/include/linux/poison.h b/include/linux/poison.h
60469 index 79159de..f1233a9 100644
60470 --- a/include/linux/poison.h
60471 +++ b/include/linux/poison.h
60472 @@ -19,8 +19,8 @@
60473 * under normal circumstances, used to verify that nobody uses
60474 * non-initialized list entries.
60475 */
60476 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60477 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60478 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60479 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60480
60481 /********** include/linux/timer.h **********/
60482 /*
60483 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60484 index 58969b2..ead129b 100644
60485 --- a/include/linux/preempt.h
60486 +++ b/include/linux/preempt.h
60487 @@ -123,7 +123,7 @@ struct preempt_ops {
60488 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60489 void (*sched_out)(struct preempt_notifier *notifier,
60490 struct task_struct *next);
60491 -};
60492 +} __no_const;
60493
60494 /**
60495 * preempt_notifier - key for installing preemption notifiers
60496 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60497 index 643b96c..ef55a9c 100644
60498 --- a/include/linux/proc_fs.h
60499 +++ b/include/linux/proc_fs.h
60500 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60501 return proc_create_data(name, mode, parent, proc_fops, NULL);
60502 }
60503
60504 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60505 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60506 +{
60507 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60508 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60509 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60510 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60511 +#else
60512 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60513 +#endif
60514 +}
60515 +
60516 +
60517 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60518 mode_t mode, struct proc_dir_entry *base,
60519 read_proc_t *read_proc, void * data)
60520 @@ -258,7 +271,7 @@ union proc_op {
60521 int (*proc_show)(struct seq_file *m,
60522 struct pid_namespace *ns, struct pid *pid,
60523 struct task_struct *task);
60524 -};
60525 +} __no_const;
60526
60527 struct ctl_table_header;
60528 struct ctl_table;
60529 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60530 index 800f113..e9ee2e3 100644
60531 --- a/include/linux/ptrace.h
60532 +++ b/include/linux/ptrace.h
60533 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60534 extern void exit_ptrace(struct task_struct *tracer);
60535 #define PTRACE_MODE_READ 1
60536 #define PTRACE_MODE_ATTACH 2
60537 -/* Returns 0 on success, -errno on denial. */
60538 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60539 /* Returns true on success, false on denial. */
60540 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60541 +/* Returns true on success, false on denial. */
60542 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60543 +/* Returns true on success, false on denial. */
60544 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60545
60546 static inline int ptrace_reparented(struct task_struct *child)
60547 {
60548 diff --git a/include/linux/random.h b/include/linux/random.h
60549 index 8f74538..02a1012 100644
60550 --- a/include/linux/random.h
60551 +++ b/include/linux/random.h
60552 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60553
60554 u32 prandom32(struct rnd_state *);
60555
60556 +static inline unsigned long pax_get_random_long(void)
60557 +{
60558 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60559 +}
60560 +
60561 /*
60562 * Handle minimum values for seeds
60563 */
60564 static inline u32 __seed(u32 x, u32 m)
60565 {
60566 - return (x < m) ? x + m : x;
60567 + return (x <= m) ? x + m + 1 : x;
60568 }
60569
60570 /**
60571 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60572 index e0879a7..a12f962 100644
60573 --- a/include/linux/reboot.h
60574 +++ b/include/linux/reboot.h
60575 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60576 * Architecture-specific implementations of sys_reboot commands.
60577 */
60578
60579 -extern void machine_restart(char *cmd);
60580 -extern void machine_halt(void);
60581 -extern void machine_power_off(void);
60582 +extern void machine_restart(char *cmd) __noreturn;
60583 +extern void machine_halt(void) __noreturn;
60584 +extern void machine_power_off(void) __noreturn;
60585
60586 extern void machine_shutdown(void);
60587 struct pt_regs;
60588 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60589 */
60590
60591 extern void kernel_restart_prepare(char *cmd);
60592 -extern void kernel_restart(char *cmd);
60593 -extern void kernel_halt(void);
60594 -extern void kernel_power_off(void);
60595 +extern void kernel_restart(char *cmd) __noreturn;
60596 +extern void kernel_halt(void) __noreturn;
60597 +extern void kernel_power_off(void) __noreturn;
60598
60599 extern int C_A_D; /* for sysctl */
60600 void ctrl_alt_del(void);
60601 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60602 * Emergency restart, callable from an interrupt handler.
60603 */
60604
60605 -extern void emergency_restart(void);
60606 +extern void emergency_restart(void) __noreturn;
60607 #include <asm/emergency-restart.h>
60608
60609 #endif
60610 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60611 index 96d465f..b084e05 100644
60612 --- a/include/linux/reiserfs_fs.h
60613 +++ b/include/linux/reiserfs_fs.h
60614 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60615 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60616
60617 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60618 -#define get_generation(s) atomic_read (&fs_generation(s))
60619 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60620 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60621 #define __fs_changed(gen,s) (gen != get_generation (s))
60622 #define fs_changed(gen,s) \
60623 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60624 index 52c83b6..18ed7eb 100644
60625 --- a/include/linux/reiserfs_fs_sb.h
60626 +++ b/include/linux/reiserfs_fs_sb.h
60627 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60628 /* Comment? -Hans */
60629 wait_queue_head_t s_wait;
60630 /* To be obsoleted soon by per buffer seals.. -Hans */
60631 - atomic_t s_generation_counter; // increased by one every time the
60632 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60633 // tree gets re-balanced
60634 unsigned long s_properties; /* File system properties. Currently holds
60635 on-disk FS format */
60636 diff --git a/include/linux/relay.h b/include/linux/relay.h
60637 index 14a86bc..17d0700 100644
60638 --- a/include/linux/relay.h
60639 +++ b/include/linux/relay.h
60640 @@ -159,7 +159,7 @@ struct rchan_callbacks
60641 * The callback should return 0 if successful, negative if not.
60642 */
60643 int (*remove_buf_file)(struct dentry *dentry);
60644 -};
60645 +} __no_const;
60646
60647 /*
60648 * CONFIG_RELAY kernel API, kernel/relay.c
60649 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60650 index c6c6084..5bf1212 100644
60651 --- a/include/linux/rfkill.h
60652 +++ b/include/linux/rfkill.h
60653 @@ -147,6 +147,7 @@ struct rfkill_ops {
60654 void (*query)(struct rfkill *rfkill, void *data);
60655 int (*set_block)(void *data, bool blocked);
60656 };
60657 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60658
60659 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60660 /**
60661 diff --git a/include/linux/rio.h b/include/linux/rio.h
60662 index 4d50611..c6858a2 100644
60663 --- a/include/linux/rio.h
60664 +++ b/include/linux/rio.h
60665 @@ -315,7 +315,7 @@ struct rio_ops {
60666 int mbox, void *buffer, size_t len);
60667 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60668 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60669 -};
60670 +} __no_const;
60671
60672 #define RIO_RESOURCE_MEM 0x00000100
60673 #define RIO_RESOURCE_DOORBELL 0x00000200
60674 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60675 index 2148b12..519b820 100644
60676 --- a/include/linux/rmap.h
60677 +++ b/include/linux/rmap.h
60678 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60679 void anon_vma_init(void); /* create anon_vma_cachep */
60680 int anon_vma_prepare(struct vm_area_struct *);
60681 void unlink_anon_vmas(struct vm_area_struct *);
60682 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60683 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60684 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60685 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60686 void __anon_vma_link(struct vm_area_struct *);
60687
60688 static inline void anon_vma_merge(struct vm_area_struct *vma,
60689 diff --git a/include/linux/sched.h b/include/linux/sched.h
60690 index 1c4f3e9..b4e4851 100644
60691 --- a/include/linux/sched.h
60692 +++ b/include/linux/sched.h
60693 @@ -101,6 +101,7 @@ struct bio_list;
60694 struct fs_struct;
60695 struct perf_event_context;
60696 struct blk_plug;
60697 +struct linux_binprm;
60698
60699 /*
60700 * List of flags we want to share for kernel threads,
60701 @@ -380,10 +381,13 @@ struct user_namespace;
60702 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60703
60704 extern int sysctl_max_map_count;
60705 +extern unsigned long sysctl_heap_stack_gap;
60706
60707 #include <linux/aio.h>
60708
60709 #ifdef CONFIG_MMU
60710 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60711 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60712 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60713 extern unsigned long
60714 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60715 @@ -629,6 +633,17 @@ struct signal_struct {
60716 #ifdef CONFIG_TASKSTATS
60717 struct taskstats *stats;
60718 #endif
60719 +
60720 +#ifdef CONFIG_GRKERNSEC
60721 + u32 curr_ip;
60722 + u32 saved_ip;
60723 + u32 gr_saddr;
60724 + u32 gr_daddr;
60725 + u16 gr_sport;
60726 + u16 gr_dport;
60727 + u8 used_accept:1;
60728 +#endif
60729 +
60730 #ifdef CONFIG_AUDIT
60731 unsigned audit_tty;
60732 struct tty_audit_buf *tty_audit_buf;
60733 @@ -710,6 +725,11 @@ struct user_struct {
60734 struct key *session_keyring; /* UID's default session keyring */
60735 #endif
60736
60737 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60738 + unsigned int banned;
60739 + unsigned long ban_expires;
60740 +#endif
60741 +
60742 /* Hash table maintenance information */
60743 struct hlist_node uidhash_node;
60744 uid_t uid;
60745 @@ -1337,8 +1357,8 @@ struct task_struct {
60746 struct list_head thread_group;
60747
60748 struct completion *vfork_done; /* for vfork() */
60749 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60750 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60751 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60752 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60753
60754 cputime_t utime, stime, utimescaled, stimescaled;
60755 cputime_t gtime;
60756 @@ -1354,13 +1374,6 @@ struct task_struct {
60757 struct task_cputime cputime_expires;
60758 struct list_head cpu_timers[3];
60759
60760 -/* process credentials */
60761 - const struct cred __rcu *real_cred; /* objective and real subjective task
60762 - * credentials (COW) */
60763 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60764 - * credentials (COW) */
60765 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60766 -
60767 char comm[TASK_COMM_LEN]; /* executable name excluding path
60768 - access with [gs]et_task_comm (which lock
60769 it with task_lock())
60770 @@ -1377,8 +1390,16 @@ struct task_struct {
60771 #endif
60772 /* CPU-specific state of this task */
60773 struct thread_struct thread;
60774 +/* thread_info moved to task_struct */
60775 +#ifdef CONFIG_X86
60776 + struct thread_info tinfo;
60777 +#endif
60778 /* filesystem information */
60779 struct fs_struct *fs;
60780 +
60781 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60782 + * credentials (COW) */
60783 +
60784 /* open file information */
60785 struct files_struct *files;
60786 /* namespaces */
60787 @@ -1425,6 +1446,11 @@ struct task_struct {
60788 struct rt_mutex_waiter *pi_blocked_on;
60789 #endif
60790
60791 +/* process credentials */
60792 + const struct cred __rcu *real_cred; /* objective and real subjective task
60793 + * credentials (COW) */
60794 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60795 +
60796 #ifdef CONFIG_DEBUG_MUTEXES
60797 /* mutex deadlock detection */
60798 struct mutex_waiter *blocked_on;
60799 @@ -1540,6 +1566,27 @@ struct task_struct {
60800 unsigned long default_timer_slack_ns;
60801
60802 struct list_head *scm_work_list;
60803 +
60804 +#ifdef CONFIG_GRKERNSEC
60805 + /* grsecurity */
60806 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60807 + u64 exec_id;
60808 +#endif
60809 +#ifdef CONFIG_GRKERNSEC_SETXID
60810 + const struct cred *delayed_cred;
60811 +#endif
60812 + struct dentry *gr_chroot_dentry;
60813 + struct acl_subject_label *acl;
60814 + struct acl_role_label *role;
60815 + struct file *exec_file;
60816 + u16 acl_role_id;
60817 + /* is this the task that authenticated to the special role */
60818 + u8 acl_sp_role;
60819 + u8 is_writable;
60820 + u8 brute;
60821 + u8 gr_is_chrooted;
60822 +#endif
60823 +
60824 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60825 /* Index of current stored address in ret_stack */
60826 int curr_ret_stack;
60827 @@ -1574,6 +1621,51 @@ struct task_struct {
60828 #endif
60829 };
60830
60831 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60832 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60833 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60834 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60835 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60836 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60837 +
60838 +#ifdef CONFIG_PAX_SOFTMODE
60839 +extern int pax_softmode;
60840 +#endif
60841 +
60842 +extern int pax_check_flags(unsigned long *);
60843 +
60844 +/* if tsk != current then task_lock must be held on it */
60845 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60846 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60847 +{
60848 + if (likely(tsk->mm))
60849 + return tsk->mm->pax_flags;
60850 + else
60851 + return 0UL;
60852 +}
60853 +
60854 +/* if tsk != current then task_lock must be held on it */
60855 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60856 +{
60857 + if (likely(tsk->mm)) {
60858 + tsk->mm->pax_flags = flags;
60859 + return 0;
60860 + }
60861 + return -EINVAL;
60862 +}
60863 +#endif
60864 +
60865 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60866 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60867 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60868 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60869 +#endif
60870 +
60871 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60872 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60873 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60874 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60875 +
60876 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60877 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60878
60879 @@ -2081,7 +2173,9 @@ void yield(void);
60880 extern struct exec_domain default_exec_domain;
60881
60882 union thread_union {
60883 +#ifndef CONFIG_X86
60884 struct thread_info thread_info;
60885 +#endif
60886 unsigned long stack[THREAD_SIZE/sizeof(long)];
60887 };
60888
60889 @@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
60890 */
60891
60892 extern struct task_struct *find_task_by_vpid(pid_t nr);
60893 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60894 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60895 struct pid_namespace *ns);
60896
60897 @@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
60898 extern void mmput(struct mm_struct *);
60899 /* Grab a reference to a task's mm, if it is not already going away */
60900 extern struct mm_struct *get_task_mm(struct task_struct *task);
60901 +/*
60902 + * Grab a reference to a task's mm, if it is not already going away
60903 + * and ptrace_may_access with the mode parameter passed to it
60904 + * succeeds.
60905 + */
60906 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60907 /* Remove the current tasks stale references to the old mm_struct */
60908 extern void mm_release(struct task_struct *, struct mm_struct *);
60909 /* Allocate a new mm structure and copy contents from tsk->mm */
60910 @@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60911 extern void exit_itimers(struct signal_struct *);
60912 extern void flush_itimer_signals(void);
60913
60914 -extern NORET_TYPE void do_group_exit(int);
60915 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60916
60917 extern void daemonize(const char *, ...);
60918 extern int allow_signal(int);
60919 @@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60920
60921 #endif
60922
60923 -static inline int object_is_on_stack(void *obj)
60924 +static inline int object_starts_on_stack(void *obj)
60925 {
60926 - void *stack = task_stack_page(current);
60927 + const void *stack = task_stack_page(current);
60928
60929 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60930 }
60931
60932 +#ifdef CONFIG_PAX_USERCOPY
60933 +extern int object_is_on_stack(const void *obj, unsigned long len);
60934 +#endif
60935 +
60936 extern void thread_info_cache_init(void);
60937
60938 #ifdef CONFIG_DEBUG_STACK_USAGE
60939 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60940 index 899fbb4..1cb4138 100644
60941 --- a/include/linux/screen_info.h
60942 +++ b/include/linux/screen_info.h
60943 @@ -43,7 +43,8 @@ struct screen_info {
60944 __u16 pages; /* 0x32 */
60945 __u16 vesa_attributes; /* 0x34 */
60946 __u32 capabilities; /* 0x36 */
60947 - __u8 _reserved[6]; /* 0x3a */
60948 + __u16 vesapm_size; /* 0x3a */
60949 + __u8 _reserved[4]; /* 0x3c */
60950 } __attribute__((packed));
60951
60952 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60953 diff --git a/include/linux/security.h b/include/linux/security.h
60954 index e8c619d..e0cbd1c 100644
60955 --- a/include/linux/security.h
60956 +++ b/include/linux/security.h
60957 @@ -37,6 +37,7 @@
60958 #include <linux/xfrm.h>
60959 #include <linux/slab.h>
60960 #include <linux/xattr.h>
60961 +#include <linux/grsecurity.h>
60962 #include <net/flow.h>
60963
60964 /* Maximum number of letters for an LSM name string */
60965 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60966 index 0b69a46..b2ffa4c 100644
60967 --- a/include/linux/seq_file.h
60968 +++ b/include/linux/seq_file.h
60969 @@ -24,6 +24,9 @@ struct seq_file {
60970 struct mutex lock;
60971 const struct seq_operations *op;
60972 int poll_event;
60973 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60974 + u64 exec_id;
60975 +#endif
60976 void *private;
60977 };
60978
60979 @@ -33,6 +36,7 @@ struct seq_operations {
60980 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60981 int (*show) (struct seq_file *m, void *v);
60982 };
60983 +typedef struct seq_operations __no_const seq_operations_no_const;
60984
60985 #define SEQ_SKIP 1
60986
60987 diff --git a/include/linux/shm.h b/include/linux/shm.h
60988 index 92808b8..c28cac4 100644
60989 --- a/include/linux/shm.h
60990 +++ b/include/linux/shm.h
60991 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60992
60993 /* The task created the shm object. NULL if the task is dead. */
60994 struct task_struct *shm_creator;
60995 +#ifdef CONFIG_GRKERNSEC
60996 + time_t shm_createtime;
60997 + pid_t shm_lapid;
60998 +#endif
60999 };
61000
61001 /* shm_mode upper byte flags */
61002 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
61003 index fe86488..1563c1c 100644
61004 --- a/include/linux/skbuff.h
61005 +++ b/include/linux/skbuff.h
61006 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
61007 */
61008 static inline int skb_queue_empty(const struct sk_buff_head *list)
61009 {
61010 - return list->next == (struct sk_buff *)list;
61011 + return list->next == (const struct sk_buff *)list;
61012 }
61013
61014 /**
61015 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
61016 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61017 const struct sk_buff *skb)
61018 {
61019 - return skb->next == (struct sk_buff *)list;
61020 + return skb->next == (const struct sk_buff *)list;
61021 }
61022
61023 /**
61024 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61025 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
61026 const struct sk_buff *skb)
61027 {
61028 - return skb->prev == (struct sk_buff *)list;
61029 + return skb->prev == (const struct sk_buff *)list;
61030 }
61031
61032 /**
61033 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
61034 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
61035 */
61036 #ifndef NET_SKB_PAD
61037 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
61038 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
61039 #endif
61040
61041 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
61042 diff --git a/include/linux/slab.h b/include/linux/slab.h
61043 index 573c809..e84c132 100644
61044 --- a/include/linux/slab.h
61045 +++ b/include/linux/slab.h
61046 @@ -11,12 +11,20 @@
61047
61048 #include <linux/gfp.h>
61049 #include <linux/types.h>
61050 +#include <linux/err.h>
61051
61052 /*
61053 * Flags to pass to kmem_cache_create().
61054 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61055 */
61056 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61057 +
61058 +#ifdef CONFIG_PAX_USERCOPY
61059 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61060 +#else
61061 +#define SLAB_USERCOPY 0x00000000UL
61062 +#endif
61063 +
61064 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61065 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61066 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61067 @@ -87,10 +95,13 @@
61068 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61069 * Both make kfree a no-op.
61070 */
61071 -#define ZERO_SIZE_PTR ((void *)16)
61072 +#define ZERO_SIZE_PTR \
61073 +({ \
61074 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61075 + (void *)(-MAX_ERRNO-1L); \
61076 +})
61077
61078 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61079 - (unsigned long)ZERO_SIZE_PTR)
61080 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61081
61082 /*
61083 * struct kmem_cache related prototypes
61084 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
61085 void kfree(const void *);
61086 void kzfree(const void *);
61087 size_t ksize(const void *);
61088 +void check_object_size(const void *ptr, unsigned long n, bool to);
61089
61090 /*
61091 * Allocator specific definitions. These are mainly used to establish optimized
61092 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
61093
61094 void __init kmem_cache_init_late(void);
61095
61096 +#define kmalloc(x, y) \
61097 +({ \
61098 + void *___retval; \
61099 + intoverflow_t ___x = (intoverflow_t)x; \
61100 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
61101 + ___retval = NULL; \
61102 + else \
61103 + ___retval = kmalloc((size_t)___x, (y)); \
61104 + ___retval; \
61105 +})
61106 +
61107 +#define kmalloc_node(x, y, z) \
61108 +({ \
61109 + void *___retval; \
61110 + intoverflow_t ___x = (intoverflow_t)x; \
61111 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61112 + ___retval = NULL; \
61113 + else \
61114 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
61115 + ___retval; \
61116 +})
61117 +
61118 +#define kzalloc(x, y) \
61119 +({ \
61120 + void *___retval; \
61121 + intoverflow_t ___x = (intoverflow_t)x; \
61122 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
61123 + ___retval = NULL; \
61124 + else \
61125 + ___retval = kzalloc((size_t)___x, (y)); \
61126 + ___retval; \
61127 +})
61128 +
61129 +#define __krealloc(x, y, z) \
61130 +({ \
61131 + void *___retval; \
61132 + intoverflow_t ___y = (intoverflow_t)y; \
61133 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
61134 + ___retval = NULL; \
61135 + else \
61136 + ___retval = __krealloc((x), (size_t)___y, (z)); \
61137 + ___retval; \
61138 +})
61139 +
61140 +#define krealloc(x, y, z) \
61141 +({ \
61142 + void *___retval; \
61143 + intoverflow_t ___y = (intoverflow_t)y; \
61144 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
61145 + ___retval = NULL; \
61146 + else \
61147 + ___retval = krealloc((x), (size_t)___y, (z)); \
61148 + ___retval; \
61149 +})
61150 +
61151 #endif /* _LINUX_SLAB_H */
61152 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
61153 index d00e0ba..1b3bf7b 100644
61154 --- a/include/linux/slab_def.h
61155 +++ b/include/linux/slab_def.h
61156 @@ -68,10 +68,10 @@ struct kmem_cache {
61157 unsigned long node_allocs;
61158 unsigned long node_frees;
61159 unsigned long node_overflow;
61160 - atomic_t allochit;
61161 - atomic_t allocmiss;
61162 - atomic_t freehit;
61163 - atomic_t freemiss;
61164 + atomic_unchecked_t allochit;
61165 + atomic_unchecked_t allocmiss;
61166 + atomic_unchecked_t freehit;
61167 + atomic_unchecked_t freemiss;
61168
61169 /*
61170 * If debugging is enabled, then the allocator can add additional
61171 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
61172 index a32bcfd..53b71f4 100644
61173 --- a/include/linux/slub_def.h
61174 +++ b/include/linux/slub_def.h
61175 @@ -89,7 +89,7 @@ struct kmem_cache {
61176 struct kmem_cache_order_objects max;
61177 struct kmem_cache_order_objects min;
61178 gfp_t allocflags; /* gfp flags to use on each alloc */
61179 - int refcount; /* Refcount for slab cache destroy */
61180 + atomic_t refcount; /* Refcount for slab cache destroy */
61181 void (*ctor)(void *);
61182 int inuse; /* Offset to metadata */
61183 int align; /* Alignment */
61184 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
61185 }
61186
61187 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61188 -void *__kmalloc(size_t size, gfp_t flags);
61189 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61190
61191 static __always_inline void *
61192 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
61193 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
61194 index de8832d..0147b46 100644
61195 --- a/include/linux/sonet.h
61196 +++ b/include/linux/sonet.h
61197 @@ -61,7 +61,7 @@ struct sonet_stats {
61198 #include <linux/atomic.h>
61199
61200 struct k_sonet_stats {
61201 -#define __HANDLE_ITEM(i) atomic_t i
61202 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
61203 __SONET_ITEMS
61204 #undef __HANDLE_ITEM
61205 };
61206 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
61207 index 3d8f9c4..69f1c0a 100644
61208 --- a/include/linux/sunrpc/clnt.h
61209 +++ b/include/linux/sunrpc/clnt.h
61210 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
61211 {
61212 switch (sap->sa_family) {
61213 case AF_INET:
61214 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
61215 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61216 case AF_INET6:
61217 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61218 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61219 }
61220 return 0;
61221 }
61222 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
61223 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61224 const struct sockaddr *src)
61225 {
61226 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61227 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61228 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61229
61230 dsin->sin_family = ssin->sin_family;
61231 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
61232 if (sa->sa_family != AF_INET6)
61233 return 0;
61234
61235 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61236 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61237 }
61238
61239 #endif /* __KERNEL__ */
61240 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
61241 index e775689..9e206d9 100644
61242 --- a/include/linux/sunrpc/sched.h
61243 +++ b/include/linux/sunrpc/sched.h
61244 @@ -105,6 +105,7 @@ struct rpc_call_ops {
61245 void (*rpc_call_done)(struct rpc_task *, void *);
61246 void (*rpc_release)(void *);
61247 };
61248 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61249
61250 struct rpc_task_setup {
61251 struct rpc_task *task;
61252 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
61253 index c14fe86..393245e 100644
61254 --- a/include/linux/sunrpc/svc_rdma.h
61255 +++ b/include/linux/sunrpc/svc_rdma.h
61256 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61257 extern unsigned int svcrdma_max_requests;
61258 extern unsigned int svcrdma_max_req_size;
61259
61260 -extern atomic_t rdma_stat_recv;
61261 -extern atomic_t rdma_stat_read;
61262 -extern atomic_t rdma_stat_write;
61263 -extern atomic_t rdma_stat_sq_starve;
61264 -extern atomic_t rdma_stat_rq_starve;
61265 -extern atomic_t rdma_stat_rq_poll;
61266 -extern atomic_t rdma_stat_rq_prod;
61267 -extern atomic_t rdma_stat_sq_poll;
61268 -extern atomic_t rdma_stat_sq_prod;
61269 +extern atomic_unchecked_t rdma_stat_recv;
61270 +extern atomic_unchecked_t rdma_stat_read;
61271 +extern atomic_unchecked_t rdma_stat_write;
61272 +extern atomic_unchecked_t rdma_stat_sq_starve;
61273 +extern atomic_unchecked_t rdma_stat_rq_starve;
61274 +extern atomic_unchecked_t rdma_stat_rq_poll;
61275 +extern atomic_unchecked_t rdma_stat_rq_prod;
61276 +extern atomic_unchecked_t rdma_stat_sq_poll;
61277 +extern atomic_unchecked_t rdma_stat_sq_prod;
61278
61279 #define RPCRDMA_VERSION 1
61280
61281 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61282 index 703cfa3..0b8ca72ac 100644
61283 --- a/include/linux/sysctl.h
61284 +++ b/include/linux/sysctl.h
61285 @@ -155,7 +155,11 @@ enum
61286 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61287 };
61288
61289 -
61290 +#ifdef CONFIG_PAX_SOFTMODE
61291 +enum {
61292 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61293 +};
61294 +#endif
61295
61296 /* CTL_VM names: */
61297 enum
61298 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61299
61300 extern int proc_dostring(struct ctl_table *, int,
61301 void __user *, size_t *, loff_t *);
61302 +extern int proc_dostring_modpriv(struct ctl_table *, int,
61303 + void __user *, size_t *, loff_t *);
61304 extern int proc_dointvec(struct ctl_table *, int,
61305 void __user *, size_t *, loff_t *);
61306 extern int proc_dointvec_minmax(struct ctl_table *, int,
61307 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
61308 index a71a292..51bd91d 100644
61309 --- a/include/linux/tracehook.h
61310 +++ b/include/linux/tracehook.h
61311 @@ -54,12 +54,12 @@ struct linux_binprm;
61312 /*
61313 * ptrace report for syscall entry and exit looks identical.
61314 */
61315 -static inline void ptrace_report_syscall(struct pt_regs *regs)
61316 +static inline int ptrace_report_syscall(struct pt_regs *regs)
61317 {
61318 int ptrace = current->ptrace;
61319
61320 if (!(ptrace & PT_PTRACED))
61321 - return;
61322 + return 0;
61323
61324 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
61325
61326 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61327 send_sig(current->exit_code, current, 1);
61328 current->exit_code = 0;
61329 }
61330 +
61331 + return fatal_signal_pending(current);
61332 }
61333
61334 /**
61335 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61336 static inline __must_check int tracehook_report_syscall_entry(
61337 struct pt_regs *regs)
61338 {
61339 - ptrace_report_syscall(regs);
61340 - return 0;
61341 + return ptrace_report_syscall(regs);
61342 }
61343
61344 /**
61345 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61346 index ff7dc08..893e1bd 100644
61347 --- a/include/linux/tty_ldisc.h
61348 +++ b/include/linux/tty_ldisc.h
61349 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61350
61351 struct module *owner;
61352
61353 - int refcount;
61354 + atomic_t refcount;
61355 };
61356
61357 struct tty_ldisc {
61358 diff --git a/include/linux/types.h b/include/linux/types.h
61359 index 57a9723..dbe234a 100644
61360 --- a/include/linux/types.h
61361 +++ b/include/linux/types.h
61362 @@ -213,10 +213,26 @@ typedef struct {
61363 int counter;
61364 } atomic_t;
61365
61366 +#ifdef CONFIG_PAX_REFCOUNT
61367 +typedef struct {
61368 + int counter;
61369 +} atomic_unchecked_t;
61370 +#else
61371 +typedef atomic_t atomic_unchecked_t;
61372 +#endif
61373 +
61374 #ifdef CONFIG_64BIT
61375 typedef struct {
61376 long counter;
61377 } atomic64_t;
61378 +
61379 +#ifdef CONFIG_PAX_REFCOUNT
61380 +typedef struct {
61381 + long counter;
61382 +} atomic64_unchecked_t;
61383 +#else
61384 +typedef atomic64_t atomic64_unchecked_t;
61385 +#endif
61386 #endif
61387
61388 struct list_head {
61389 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61390 index 5ca0951..ab496a5 100644
61391 --- a/include/linux/uaccess.h
61392 +++ b/include/linux/uaccess.h
61393 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61394 long ret; \
61395 mm_segment_t old_fs = get_fs(); \
61396 \
61397 - set_fs(KERNEL_DS); \
61398 pagefault_disable(); \
61399 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61400 - pagefault_enable(); \
61401 + set_fs(KERNEL_DS); \
61402 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61403 set_fs(old_fs); \
61404 + pagefault_enable(); \
61405 ret; \
61406 })
61407
61408 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61409 index 99c1b4d..bb94261 100644
61410 --- a/include/linux/unaligned/access_ok.h
61411 +++ b/include/linux/unaligned/access_ok.h
61412 @@ -6,32 +6,32 @@
61413
61414 static inline u16 get_unaligned_le16(const void *p)
61415 {
61416 - return le16_to_cpup((__le16 *)p);
61417 + return le16_to_cpup((const __le16 *)p);
61418 }
61419
61420 static inline u32 get_unaligned_le32(const void *p)
61421 {
61422 - return le32_to_cpup((__le32 *)p);
61423 + return le32_to_cpup((const __le32 *)p);
61424 }
61425
61426 static inline u64 get_unaligned_le64(const void *p)
61427 {
61428 - return le64_to_cpup((__le64 *)p);
61429 + return le64_to_cpup((const __le64 *)p);
61430 }
61431
61432 static inline u16 get_unaligned_be16(const void *p)
61433 {
61434 - return be16_to_cpup((__be16 *)p);
61435 + return be16_to_cpup((const __be16 *)p);
61436 }
61437
61438 static inline u32 get_unaligned_be32(const void *p)
61439 {
61440 - return be32_to_cpup((__be32 *)p);
61441 + return be32_to_cpup((const __be32 *)p);
61442 }
61443
61444 static inline u64 get_unaligned_be64(const void *p)
61445 {
61446 - return be64_to_cpup((__be64 *)p);
61447 + return be64_to_cpup((const __be64 *)p);
61448 }
61449
61450 static inline void put_unaligned_le16(u16 val, void *p)
61451 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61452 index e5a40c3..20ab0f6 100644
61453 --- a/include/linux/usb/renesas_usbhs.h
61454 +++ b/include/linux/usb/renesas_usbhs.h
61455 @@ -39,7 +39,7 @@ enum {
61456 */
61457 struct renesas_usbhs_driver_callback {
61458 int (*notify_hotplug)(struct platform_device *pdev);
61459 -};
61460 +} __no_const;
61461
61462 /*
61463 * callback functions for platform
61464 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61465 * VBUS control is needed for Host
61466 */
61467 int (*set_vbus)(struct platform_device *pdev, int enable);
61468 -};
61469 +} __no_const;
61470
61471 /*
61472 * parameters for renesas usbhs
61473 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61474 index 6f8fbcf..8259001 100644
61475 --- a/include/linux/vermagic.h
61476 +++ b/include/linux/vermagic.h
61477 @@ -25,9 +25,35 @@
61478 #define MODULE_ARCH_VERMAGIC ""
61479 #endif
61480
61481 +#ifdef CONFIG_PAX_REFCOUNT
61482 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
61483 +#else
61484 +#define MODULE_PAX_REFCOUNT ""
61485 +#endif
61486 +
61487 +#ifdef CONSTIFY_PLUGIN
61488 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61489 +#else
61490 +#define MODULE_CONSTIFY_PLUGIN ""
61491 +#endif
61492 +
61493 +#ifdef STACKLEAK_PLUGIN
61494 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61495 +#else
61496 +#define MODULE_STACKLEAK_PLUGIN ""
61497 +#endif
61498 +
61499 +#ifdef CONFIG_GRKERNSEC
61500 +#define MODULE_GRSEC "GRSEC "
61501 +#else
61502 +#define MODULE_GRSEC ""
61503 +#endif
61504 +
61505 #define VERMAGIC_STRING \
61506 UTS_RELEASE " " \
61507 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61508 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61509 - MODULE_ARCH_VERMAGIC
61510 + MODULE_ARCH_VERMAGIC \
61511 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61512 + MODULE_GRSEC
61513
61514 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61515 index 4bde182..aec92c1 100644
61516 --- a/include/linux/vmalloc.h
61517 +++ b/include/linux/vmalloc.h
61518 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61519 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61520 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61521 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61522 +
61523 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61524 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61525 +#endif
61526 +
61527 /* bits [20..32] reserved for arch specific ioremap internals */
61528
61529 /*
61530 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61531 # endif
61532 #endif
61533
61534 +#define vmalloc(x) \
61535 +({ \
61536 + void *___retval; \
61537 + intoverflow_t ___x = (intoverflow_t)x; \
61538 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61539 + ___retval = NULL; \
61540 + else \
61541 + ___retval = vmalloc((unsigned long)___x); \
61542 + ___retval; \
61543 +})
61544 +
61545 +#define vzalloc(x) \
61546 +({ \
61547 + void *___retval; \
61548 + intoverflow_t ___x = (intoverflow_t)x; \
61549 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61550 + ___retval = NULL; \
61551 + else \
61552 + ___retval = vzalloc((unsigned long)___x); \
61553 + ___retval; \
61554 +})
61555 +
61556 +#define __vmalloc(x, y, z) \
61557 +({ \
61558 + void *___retval; \
61559 + intoverflow_t ___x = (intoverflow_t)x; \
61560 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61561 + ___retval = NULL; \
61562 + else \
61563 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61564 + ___retval; \
61565 +})
61566 +
61567 +#define vmalloc_user(x) \
61568 +({ \
61569 + void *___retval; \
61570 + intoverflow_t ___x = (intoverflow_t)x; \
61571 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61572 + ___retval = NULL; \
61573 + else \
61574 + ___retval = vmalloc_user((unsigned long)___x); \
61575 + ___retval; \
61576 +})
61577 +
61578 +#define vmalloc_exec(x) \
61579 +({ \
61580 + void *___retval; \
61581 + intoverflow_t ___x = (intoverflow_t)x; \
61582 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61583 + ___retval = NULL; \
61584 + else \
61585 + ___retval = vmalloc_exec((unsigned long)___x); \
61586 + ___retval; \
61587 +})
61588 +
61589 +#define vmalloc_node(x, y) \
61590 +({ \
61591 + void *___retval; \
61592 + intoverflow_t ___x = (intoverflow_t)x; \
61593 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61594 + ___retval = NULL; \
61595 + else \
61596 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61597 + ___retval; \
61598 +})
61599 +
61600 +#define vzalloc_node(x, y) \
61601 +({ \
61602 + void *___retval; \
61603 + intoverflow_t ___x = (intoverflow_t)x; \
61604 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61605 + ___retval = NULL; \
61606 + else \
61607 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61608 + ___retval; \
61609 +})
61610 +
61611 +#define vmalloc_32(x) \
61612 +({ \
61613 + void *___retval; \
61614 + intoverflow_t ___x = (intoverflow_t)x; \
61615 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61616 + ___retval = NULL; \
61617 + else \
61618 + ___retval = vmalloc_32((unsigned long)___x); \
61619 + ___retval; \
61620 +})
61621 +
61622 +#define vmalloc_32_user(x) \
61623 +({ \
61624 +void *___retval; \
61625 + intoverflow_t ___x = (intoverflow_t)x; \
61626 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61627 + ___retval = NULL; \
61628 + else \
61629 + ___retval = vmalloc_32_user((unsigned long)___x);\
61630 + ___retval; \
61631 +})
61632 +
61633 #endif /* _LINUX_VMALLOC_H */
61634 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61635 index 65efb92..137adbb 100644
61636 --- a/include/linux/vmstat.h
61637 +++ b/include/linux/vmstat.h
61638 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61639 /*
61640 * Zone based page accounting with per cpu differentials.
61641 */
61642 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61643 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61644
61645 static inline void zone_page_state_add(long x, struct zone *zone,
61646 enum zone_stat_item item)
61647 {
61648 - atomic_long_add(x, &zone->vm_stat[item]);
61649 - atomic_long_add(x, &vm_stat[item]);
61650 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61651 + atomic_long_add_unchecked(x, &vm_stat[item]);
61652 }
61653
61654 static inline unsigned long global_page_state(enum zone_stat_item item)
61655 {
61656 - long x = atomic_long_read(&vm_stat[item]);
61657 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61658 #ifdef CONFIG_SMP
61659 if (x < 0)
61660 x = 0;
61661 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61662 static inline unsigned long zone_page_state(struct zone *zone,
61663 enum zone_stat_item item)
61664 {
61665 - long x = atomic_long_read(&zone->vm_stat[item]);
61666 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61667 #ifdef CONFIG_SMP
61668 if (x < 0)
61669 x = 0;
61670 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61671 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61672 enum zone_stat_item item)
61673 {
61674 - long x = atomic_long_read(&zone->vm_stat[item]);
61675 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61676
61677 #ifdef CONFIG_SMP
61678 int cpu;
61679 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61680
61681 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61682 {
61683 - atomic_long_inc(&zone->vm_stat[item]);
61684 - atomic_long_inc(&vm_stat[item]);
61685 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61686 + atomic_long_inc_unchecked(&vm_stat[item]);
61687 }
61688
61689 static inline void __inc_zone_page_state(struct page *page,
61690 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61691
61692 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61693 {
61694 - atomic_long_dec(&zone->vm_stat[item]);
61695 - atomic_long_dec(&vm_stat[item]);
61696 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61697 + atomic_long_dec_unchecked(&vm_stat[item]);
61698 }
61699
61700 static inline void __dec_zone_page_state(struct page *page,
61701 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61702 index e5d1220..ef6e406 100644
61703 --- a/include/linux/xattr.h
61704 +++ b/include/linux/xattr.h
61705 @@ -57,6 +57,11 @@
61706 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61707 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61708
61709 +/* User namespace */
61710 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61711 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
61712 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61713 +
61714 #ifdef __KERNEL__
61715
61716 #include <linux/types.h>
61717 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61718 index 4aeff96..b378cdc 100644
61719 --- a/include/media/saa7146_vv.h
61720 +++ b/include/media/saa7146_vv.h
61721 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61722 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61723
61724 /* the extension can override this */
61725 - struct v4l2_ioctl_ops ops;
61726 + v4l2_ioctl_ops_no_const ops;
61727 /* pointer to the saa7146 core ops */
61728 const struct v4l2_ioctl_ops *core_ops;
61729
61730 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61731 index c7c40f1..4f01585 100644
61732 --- a/include/media/v4l2-dev.h
61733 +++ b/include/media/v4l2-dev.h
61734 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61735
61736
61737 struct v4l2_file_operations {
61738 - struct module *owner;
61739 + struct module * const owner;
61740 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61741 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61742 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61743 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61744 int (*open) (struct file *);
61745 int (*release) (struct file *);
61746 };
61747 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61748
61749 /*
61750 * Newer version of video_device, handled by videodev2.c
61751 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61752 index 4d1c74a..65e1221 100644
61753 --- a/include/media/v4l2-ioctl.h
61754 +++ b/include/media/v4l2-ioctl.h
61755 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61756 long (*vidioc_default) (struct file *file, void *fh,
61757 bool valid_prio, int cmd, void *arg);
61758 };
61759 -
61760 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61761
61762 /* v4l debugging and diagnostics */
61763
61764 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61765 index 8d55251..dfe5b0a 100644
61766 --- a/include/net/caif/caif_hsi.h
61767 +++ b/include/net/caif/caif_hsi.h
61768 @@ -98,7 +98,7 @@ struct cfhsi_drv {
61769 void (*rx_done_cb) (struct cfhsi_drv *drv);
61770 void (*wake_up_cb) (struct cfhsi_drv *drv);
61771 void (*wake_down_cb) (struct cfhsi_drv *drv);
61772 -};
61773 +} __no_const;
61774
61775 /* Structure implemented by HSI device. */
61776 struct cfhsi_dev {
61777 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61778 index 9e5425b..8136ffc 100644
61779 --- a/include/net/caif/cfctrl.h
61780 +++ b/include/net/caif/cfctrl.h
61781 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61782 void (*radioset_rsp)(void);
61783 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61784 struct cflayer *client_layer);
61785 -};
61786 +} __no_const;
61787
61788 /* Link Setup Parameters for CAIF-Links. */
61789 struct cfctrl_link_param {
61790 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61791 struct cfctrl {
61792 struct cfsrvl serv;
61793 struct cfctrl_rsp res;
61794 - atomic_t req_seq_no;
61795 - atomic_t rsp_seq_no;
61796 + atomic_unchecked_t req_seq_no;
61797 + atomic_unchecked_t rsp_seq_no;
61798 struct list_head list;
61799 /* Protects from simultaneous access to first_req list */
61800 spinlock_t info_list_lock;
61801 diff --git a/include/net/flow.h b/include/net/flow.h
61802 index 57f15a7..0de26c6 100644
61803 --- a/include/net/flow.h
61804 +++ b/include/net/flow.h
61805 @@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61806
61807 extern void flow_cache_flush(void);
61808 extern void flow_cache_flush_deferred(void);
61809 -extern atomic_t flow_cache_genid;
61810 +extern atomic_unchecked_t flow_cache_genid;
61811
61812 #endif
61813 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61814 index e9ff3fc..9d3e5c7 100644
61815 --- a/include/net/inetpeer.h
61816 +++ b/include/net/inetpeer.h
61817 @@ -48,8 +48,8 @@ struct inet_peer {
61818 */
61819 union {
61820 struct {
61821 - atomic_t rid; /* Frag reception counter */
61822 - atomic_t ip_id_count; /* IP ID for the next packet */
61823 + atomic_unchecked_t rid; /* Frag reception counter */
61824 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61825 __u32 tcp_ts;
61826 __u32 tcp_ts_stamp;
61827 };
61828 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61829 more++;
61830 inet_peer_refcheck(p);
61831 do {
61832 - old = atomic_read(&p->ip_id_count);
61833 + old = atomic_read_unchecked(&p->ip_id_count);
61834 new = old + more;
61835 if (!new)
61836 new = 1;
61837 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61838 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61839 return new;
61840 }
61841
61842 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61843 index 10422ef..662570f 100644
61844 --- a/include/net/ip_fib.h
61845 +++ b/include/net/ip_fib.h
61846 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61847
61848 #define FIB_RES_SADDR(net, res) \
61849 ((FIB_RES_NH(res).nh_saddr_genid == \
61850 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61851 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61852 FIB_RES_NH(res).nh_saddr : \
61853 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61854 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61855 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61856 index e5a7b9a..f4fc44b 100644
61857 --- a/include/net/ip_vs.h
61858 +++ b/include/net/ip_vs.h
61859 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61860 struct ip_vs_conn *control; /* Master control connection */
61861 atomic_t n_control; /* Number of controlled ones */
61862 struct ip_vs_dest *dest; /* real server */
61863 - atomic_t in_pkts; /* incoming packet counter */
61864 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61865
61866 /* packet transmitter for different forwarding methods. If it
61867 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61868 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61869 __be16 port; /* port number of the server */
61870 union nf_inet_addr addr; /* IP address of the server */
61871 volatile unsigned flags; /* dest status flags */
61872 - atomic_t conn_flags; /* flags to copy to conn */
61873 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61874 atomic_t weight; /* server weight */
61875
61876 atomic_t refcnt; /* reference counter */
61877 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61878 index 69b610a..fe3962c 100644
61879 --- a/include/net/irda/ircomm_core.h
61880 +++ b/include/net/irda/ircomm_core.h
61881 @@ -51,7 +51,7 @@ typedef struct {
61882 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61883 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61884 struct ircomm_info *);
61885 -} call_t;
61886 +} __no_const call_t;
61887
61888 struct ircomm_cb {
61889 irda_queue_t queue;
61890 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61891 index 59ba38bc..d515662 100644
61892 --- a/include/net/irda/ircomm_tty.h
61893 +++ b/include/net/irda/ircomm_tty.h
61894 @@ -35,6 +35,7 @@
61895 #include <linux/termios.h>
61896 #include <linux/timer.h>
61897 #include <linux/tty.h> /* struct tty_struct */
61898 +#include <asm/local.h>
61899
61900 #include <net/irda/irias_object.h>
61901 #include <net/irda/ircomm_core.h>
61902 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61903 unsigned short close_delay;
61904 unsigned short closing_wait; /* time to wait before closing */
61905
61906 - int open_count;
61907 - int blocked_open; /* # of blocked opens */
61908 + local_t open_count;
61909 + local_t blocked_open; /* # of blocked opens */
61910
61911 /* Protect concurent access to :
61912 * o self->open_count
61913 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61914 index f2419cf..473679f 100644
61915 --- a/include/net/iucv/af_iucv.h
61916 +++ b/include/net/iucv/af_iucv.h
61917 @@ -139,7 +139,7 @@ struct iucv_sock {
61918 struct iucv_sock_list {
61919 struct hlist_head head;
61920 rwlock_t lock;
61921 - atomic_t autobind_name;
61922 + atomic_unchecked_t autobind_name;
61923 };
61924
61925 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61926 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61927 index 2720884..3aa5c25 100644
61928 --- a/include/net/neighbour.h
61929 +++ b/include/net/neighbour.h
61930 @@ -122,7 +122,7 @@ struct neigh_ops {
61931 void (*error_report)(struct neighbour *, struct sk_buff *);
61932 int (*output)(struct neighbour *, struct sk_buff *);
61933 int (*connected_output)(struct neighbour *, struct sk_buff *);
61934 -};
61935 +} __do_const;
61936
61937 struct pneigh_entry {
61938 struct pneigh_entry *next;
61939 diff --git a/include/net/netlink.h b/include/net/netlink.h
61940 index cb1f350..3279d2c 100644
61941 --- a/include/net/netlink.h
61942 +++ b/include/net/netlink.h
61943 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61944 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61945 {
61946 if (mark)
61947 - skb_trim(skb, (unsigned char *) mark - skb->data);
61948 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61949 }
61950
61951 /**
61952 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61953 index d786b4f..4c3dd41 100644
61954 --- a/include/net/netns/ipv4.h
61955 +++ b/include/net/netns/ipv4.h
61956 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61957
61958 unsigned int sysctl_ping_group_range[2];
61959
61960 - atomic_t rt_genid;
61961 - atomic_t dev_addr_genid;
61962 + atomic_unchecked_t rt_genid;
61963 + atomic_unchecked_t dev_addr_genid;
61964
61965 #ifdef CONFIG_IP_MROUTE
61966 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61967 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61968 index 6a72a58..e6a127d 100644
61969 --- a/include/net/sctp/sctp.h
61970 +++ b/include/net/sctp/sctp.h
61971 @@ -318,9 +318,9 @@ do { \
61972
61973 #else /* SCTP_DEBUG */
61974
61975 -#define SCTP_DEBUG_PRINTK(whatever...)
61976 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61977 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61978 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61979 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61980 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61981 #define SCTP_ENABLE_DEBUG
61982 #define SCTP_DISABLE_DEBUG
61983 #define SCTP_ASSERT(expr, str, func)
61984 diff --git a/include/net/sock.h b/include/net/sock.h
61985 index 32e3937..87a1dbc 100644
61986 --- a/include/net/sock.h
61987 +++ b/include/net/sock.h
61988 @@ -277,7 +277,7 @@ struct sock {
61989 #ifdef CONFIG_RPS
61990 __u32 sk_rxhash;
61991 #endif
61992 - atomic_t sk_drops;
61993 + atomic_unchecked_t sk_drops;
61994 int sk_rcvbuf;
61995
61996 struct sk_filter __rcu *sk_filter;
61997 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61998 }
61999
62000 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
62001 - char __user *from, char *to,
62002 + char __user *from, unsigned char *to,
62003 int copy, int offset)
62004 {
62005 if (skb->ip_summed == CHECKSUM_NONE) {
62006 diff --git a/include/net/tcp.h b/include/net/tcp.h
62007 index bb18c4d..bb87972 100644
62008 --- a/include/net/tcp.h
62009 +++ b/include/net/tcp.h
62010 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
62011 char *name;
62012 sa_family_t family;
62013 const struct file_operations *seq_fops;
62014 - struct seq_operations seq_ops;
62015 + seq_operations_no_const seq_ops;
62016 };
62017
62018 struct tcp_iter_state {
62019 diff --git a/include/net/udp.h b/include/net/udp.h
62020 index 3b285f4..0219639 100644
62021 --- a/include/net/udp.h
62022 +++ b/include/net/udp.h
62023 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
62024 sa_family_t family;
62025 struct udp_table *udp_table;
62026 const struct file_operations *seq_fops;
62027 - struct seq_operations seq_ops;
62028 + seq_operations_no_const seq_ops;
62029 };
62030
62031 struct udp_iter_state {
62032 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
62033 index b203e14..1df3991 100644
62034 --- a/include/net/xfrm.h
62035 +++ b/include/net/xfrm.h
62036 @@ -505,7 +505,7 @@ struct xfrm_policy {
62037 struct timer_list timer;
62038
62039 struct flow_cache_object flo;
62040 - atomic_t genid;
62041 + atomic_unchecked_t genid;
62042 u32 priority;
62043 u32 index;
62044 struct xfrm_mark mark;
62045 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
62046 index 1a046b1..ee0bef0 100644
62047 --- a/include/rdma/iw_cm.h
62048 +++ b/include/rdma/iw_cm.h
62049 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
62050 int backlog);
62051
62052 int (*destroy_listen)(struct iw_cm_id *cm_id);
62053 -};
62054 +} __no_const;
62055
62056 /**
62057 * iw_create_cm_id - Create an IW CM identifier.
62058 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
62059 index 5d1a758..1dbf795 100644
62060 --- a/include/scsi/libfc.h
62061 +++ b/include/scsi/libfc.h
62062 @@ -748,6 +748,7 @@ struct libfc_function_template {
62063 */
62064 void (*disc_stop_final) (struct fc_lport *);
62065 };
62066 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
62067
62068 /**
62069 * struct fc_disc - Discovery context
62070 @@ -851,7 +852,7 @@ struct fc_lport {
62071 struct fc_vport *vport;
62072
62073 /* Operational Information */
62074 - struct libfc_function_template tt;
62075 + libfc_function_template_no_const tt;
62076 u8 link_up;
62077 u8 qfull;
62078 enum fc_lport_state state;
62079 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
62080 index 5591ed5..13eb457 100644
62081 --- a/include/scsi/scsi_device.h
62082 +++ b/include/scsi/scsi_device.h
62083 @@ -161,9 +161,9 @@ struct scsi_device {
62084 unsigned int max_device_blocked; /* what device_blocked counts down from */
62085 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
62086
62087 - atomic_t iorequest_cnt;
62088 - atomic_t iodone_cnt;
62089 - atomic_t ioerr_cnt;
62090 + atomic_unchecked_t iorequest_cnt;
62091 + atomic_unchecked_t iodone_cnt;
62092 + atomic_unchecked_t ioerr_cnt;
62093
62094 struct device sdev_gendev,
62095 sdev_dev;
62096 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
62097 index 2a65167..91e01f8 100644
62098 --- a/include/scsi/scsi_transport_fc.h
62099 +++ b/include/scsi/scsi_transport_fc.h
62100 @@ -711,7 +711,7 @@ struct fc_function_template {
62101 unsigned long show_host_system_hostname:1;
62102
62103 unsigned long disable_target_scan:1;
62104 -};
62105 +} __do_const;
62106
62107
62108 /**
62109 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
62110 index 030b87c..98a6954 100644
62111 --- a/include/sound/ak4xxx-adda.h
62112 +++ b/include/sound/ak4xxx-adda.h
62113 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62114 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62115 unsigned char val);
62116 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62117 -};
62118 +} __no_const;
62119
62120 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62121
62122 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
62123 index 8c05e47..2b5df97 100644
62124 --- a/include/sound/hwdep.h
62125 +++ b/include/sound/hwdep.h
62126 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62127 struct snd_hwdep_dsp_status *status);
62128 int (*dsp_load)(struct snd_hwdep *hw,
62129 struct snd_hwdep_dsp_image *image);
62130 -};
62131 +} __no_const;
62132
62133 struct snd_hwdep {
62134 struct snd_card *card;
62135 diff --git a/include/sound/info.h b/include/sound/info.h
62136 index 5492cc4..1a65278 100644
62137 --- a/include/sound/info.h
62138 +++ b/include/sound/info.h
62139 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
62140 struct snd_info_buffer *buffer);
62141 void (*write)(struct snd_info_entry *entry,
62142 struct snd_info_buffer *buffer);
62143 -};
62144 +} __no_const;
62145
62146 struct snd_info_entry_ops {
62147 int (*open)(struct snd_info_entry *entry,
62148 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
62149 index 0cf91b2..b70cae4 100644
62150 --- a/include/sound/pcm.h
62151 +++ b/include/sound/pcm.h
62152 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
62153 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
62154 int (*ack)(struct snd_pcm_substream *substream);
62155 };
62156 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
62157
62158 /*
62159 *
62160 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
62161 index af1b49e..a5d55a5 100644
62162 --- a/include/sound/sb16_csp.h
62163 +++ b/include/sound/sb16_csp.h
62164 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
62165 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62166 int (*csp_stop) (struct snd_sb_csp * p);
62167 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62168 -};
62169 +} __no_const;
62170
62171 /*
62172 * CSP private data
62173 diff --git a/include/sound/soc.h b/include/sound/soc.h
62174 index 11cfb59..e3f93f4 100644
62175 --- a/include/sound/soc.h
62176 +++ b/include/sound/soc.h
62177 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
62178 /* platform IO - used for platform DAPM */
62179 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
62180 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
62181 -};
62182 +} __do_const;
62183
62184 struct snd_soc_platform {
62185 const char *name;
62186 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
62187 index 444cd6b..3327cc5 100644
62188 --- a/include/sound/ymfpci.h
62189 +++ b/include/sound/ymfpci.h
62190 @@ -358,7 +358,7 @@ struct snd_ymfpci {
62191 spinlock_t reg_lock;
62192 spinlock_t voice_lock;
62193 wait_queue_head_t interrupt_sleep;
62194 - atomic_t interrupt_sleep_count;
62195 + atomic_unchecked_t interrupt_sleep_count;
62196 struct snd_info_entry *proc_entry;
62197 const struct firmware *dsp_microcode;
62198 const struct firmware *controller_microcode;
62199 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
62200 index a79886c..b483af6 100644
62201 --- a/include/target/target_core_base.h
62202 +++ b/include/target/target_core_base.h
62203 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
62204 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
62205 int (*t10_pr_register)(struct se_cmd *);
62206 int (*t10_pr_clear)(struct se_cmd *);
62207 -};
62208 +} __no_const;
62209
62210 struct t10_reservation {
62211 /* Reservation effects all target ports */
62212 @@ -465,8 +465,8 @@ struct se_cmd {
62213 atomic_t t_se_count;
62214 atomic_t t_task_cdbs_left;
62215 atomic_t t_task_cdbs_ex_left;
62216 - atomic_t t_task_cdbs_sent;
62217 - atomic_t t_transport_aborted;
62218 + atomic_unchecked_t t_task_cdbs_sent;
62219 + atomic_unchecked_t t_transport_aborted;
62220 atomic_t t_transport_active;
62221 atomic_t t_transport_complete;
62222 atomic_t t_transport_queue_active;
62223 @@ -704,7 +704,7 @@ struct se_device {
62224 /* Active commands on this virtual SE device */
62225 atomic_t simple_cmds;
62226 atomic_t depth_left;
62227 - atomic_t dev_ordered_id;
62228 + atomic_unchecked_t dev_ordered_id;
62229 atomic_t execute_tasks;
62230 atomic_t dev_ordered_sync;
62231 atomic_t dev_qf_count;
62232 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
62233 index 1c09820..7f5ec79 100644
62234 --- a/include/trace/events/irq.h
62235 +++ b/include/trace/events/irq.h
62236 @@ -36,7 +36,7 @@ struct softirq_action;
62237 */
62238 TRACE_EVENT(irq_handler_entry,
62239
62240 - TP_PROTO(int irq, struct irqaction *action),
62241 + TP_PROTO(int irq, const struct irqaction *action),
62242
62243 TP_ARGS(irq, action),
62244
62245 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
62246 */
62247 TRACE_EVENT(irq_handler_exit,
62248
62249 - TP_PROTO(int irq, struct irqaction *action, int ret),
62250 + TP_PROTO(int irq, const struct irqaction *action, int ret),
62251
62252 TP_ARGS(irq, action, ret),
62253
62254 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
62255 index c41f308..6918de3 100644
62256 --- a/include/video/udlfb.h
62257 +++ b/include/video/udlfb.h
62258 @@ -52,10 +52,10 @@ struct dlfb_data {
62259 u32 pseudo_palette[256];
62260 int blank_mode; /*one of FB_BLANK_ */
62261 /* blit-only rendering path metrics, exposed through sysfs */
62262 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62263 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
62264 - atomic_t bytes_sent; /* to usb, after compression including overhead */
62265 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
62266 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62267 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
62268 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
62269 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
62270 };
62271
62272 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62273 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
62274 index 0993a22..32ba2fe 100644
62275 --- a/include/video/uvesafb.h
62276 +++ b/include/video/uvesafb.h
62277 @@ -177,6 +177,7 @@ struct uvesafb_par {
62278 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62279 u8 pmi_setpal; /* PMI for palette changes */
62280 u16 *pmi_base; /* protected mode interface location */
62281 + u8 *pmi_code; /* protected mode code location */
62282 void *pmi_start;
62283 void *pmi_pal;
62284 u8 *vbe_state_orig; /*
62285 diff --git a/init/Kconfig b/init/Kconfig
62286 index 43298f9..2f56c12 100644
62287 --- a/init/Kconfig
62288 +++ b/init/Kconfig
62289 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62290
62291 config COMPAT_BRK
62292 bool "Disable heap randomization"
62293 - default y
62294 + default n
62295 help
62296 Randomizing heap placement makes heap exploits harder, but it
62297 also breaks ancient binaries (including anything libc5 based).
62298 diff --git a/init/do_mounts.c b/init/do_mounts.c
62299 index db6e5ee..7677ff7 100644
62300 --- a/init/do_mounts.c
62301 +++ b/init/do_mounts.c
62302 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62303
62304 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62305 {
62306 - int err = sys_mount(name, "/root", fs, flags, data);
62307 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62308 if (err)
62309 return err;
62310
62311 - sys_chdir((const char __user __force *)"/root");
62312 + sys_chdir((const char __force_user*)"/root");
62313 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62314 printk(KERN_INFO
62315 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62316 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62317 va_start(args, fmt);
62318 vsprintf(buf, fmt, args);
62319 va_end(args);
62320 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62321 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62322 if (fd >= 0) {
62323 sys_ioctl(fd, FDEJECT, 0);
62324 sys_close(fd);
62325 }
62326 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62327 - fd = sys_open("/dev/console", O_RDWR, 0);
62328 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62329 if (fd >= 0) {
62330 sys_ioctl(fd, TCGETS, (long)&termios);
62331 termios.c_lflag &= ~ICANON;
62332 sys_ioctl(fd, TCSETSF, (long)&termios);
62333 - sys_read(fd, &c, 1);
62334 + sys_read(fd, (char __user *)&c, 1);
62335 termios.c_lflag |= ICANON;
62336 sys_ioctl(fd, TCSETSF, (long)&termios);
62337 sys_close(fd);
62338 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62339 mount_root();
62340 out:
62341 devtmpfs_mount("dev");
62342 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62343 - sys_chroot((const char __user __force *)".");
62344 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62345 + sys_chroot((const char __force_user *)".");
62346 }
62347 diff --git a/init/do_mounts.h b/init/do_mounts.h
62348 index f5b978a..69dbfe8 100644
62349 --- a/init/do_mounts.h
62350 +++ b/init/do_mounts.h
62351 @@ -15,15 +15,15 @@ extern int root_mountflags;
62352
62353 static inline int create_dev(char *name, dev_t dev)
62354 {
62355 - sys_unlink(name);
62356 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62357 + sys_unlink((char __force_user *)name);
62358 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62359 }
62360
62361 #if BITS_PER_LONG == 32
62362 static inline u32 bstat(char *name)
62363 {
62364 struct stat64 stat;
62365 - if (sys_stat64(name, &stat) != 0)
62366 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62367 return 0;
62368 if (!S_ISBLK(stat.st_mode))
62369 return 0;
62370 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62371 static inline u32 bstat(char *name)
62372 {
62373 struct stat stat;
62374 - if (sys_newstat(name, &stat) != 0)
62375 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62376 return 0;
62377 if (!S_ISBLK(stat.st_mode))
62378 return 0;
62379 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62380 index 3098a38..253064e 100644
62381 --- a/init/do_mounts_initrd.c
62382 +++ b/init/do_mounts_initrd.c
62383 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62384 create_dev("/dev/root.old", Root_RAM0);
62385 /* mount initrd on rootfs' /root */
62386 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62387 - sys_mkdir("/old", 0700);
62388 - root_fd = sys_open("/", 0, 0);
62389 - old_fd = sys_open("/old", 0, 0);
62390 + sys_mkdir((const char __force_user *)"/old", 0700);
62391 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
62392 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62393 /* move initrd over / and chdir/chroot in initrd root */
62394 - sys_chdir("/root");
62395 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62396 - sys_chroot(".");
62397 + sys_chdir((const char __force_user *)"/root");
62398 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62399 + sys_chroot((const char __force_user *)".");
62400
62401 /*
62402 * In case that a resume from disk is carried out by linuxrc or one of
62403 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62404
62405 /* move initrd to rootfs' /old */
62406 sys_fchdir(old_fd);
62407 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
62408 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62409 /* switch root and cwd back to / of rootfs */
62410 sys_fchdir(root_fd);
62411 - sys_chroot(".");
62412 + sys_chroot((const char __force_user *)".");
62413 sys_close(old_fd);
62414 sys_close(root_fd);
62415
62416 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62417 - sys_chdir("/old");
62418 + sys_chdir((const char __force_user *)"/old");
62419 return;
62420 }
62421
62422 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62423 mount_root();
62424
62425 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62426 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62427 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62428 if (!error)
62429 printk("okay\n");
62430 else {
62431 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
62432 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62433 if (error == -ENOENT)
62434 printk("/initrd does not exist. Ignored.\n");
62435 else
62436 printk("failed\n");
62437 printk(KERN_NOTICE "Unmounting old root\n");
62438 - sys_umount("/old", MNT_DETACH);
62439 + sys_umount((char __force_user *)"/old", MNT_DETACH);
62440 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62441 if (fd < 0) {
62442 error = fd;
62443 @@ -116,11 +116,11 @@ int __init initrd_load(void)
62444 * mounted in the normal path.
62445 */
62446 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62447 - sys_unlink("/initrd.image");
62448 + sys_unlink((const char __force_user *)"/initrd.image");
62449 handle_initrd();
62450 return 1;
62451 }
62452 }
62453 - sys_unlink("/initrd.image");
62454 + sys_unlink((const char __force_user *)"/initrd.image");
62455 return 0;
62456 }
62457 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62458 index 32c4799..c27ee74 100644
62459 --- a/init/do_mounts_md.c
62460 +++ b/init/do_mounts_md.c
62461 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62462 partitioned ? "_d" : "", minor,
62463 md_setup_args[ent].device_names);
62464
62465 - fd = sys_open(name, 0, 0);
62466 + fd = sys_open((char __force_user *)name, 0, 0);
62467 if (fd < 0) {
62468 printk(KERN_ERR "md: open failed - cannot start "
62469 "array %s\n", name);
62470 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62471 * array without it
62472 */
62473 sys_close(fd);
62474 - fd = sys_open(name, 0, 0);
62475 + fd = sys_open((char __force_user *)name, 0, 0);
62476 sys_ioctl(fd, BLKRRPART, 0);
62477 }
62478 sys_close(fd);
62479 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62480
62481 wait_for_device_probe();
62482
62483 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62484 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62485 if (fd >= 0) {
62486 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62487 sys_close(fd);
62488 diff --git a/init/initramfs.c b/init/initramfs.c
62489 index 2531811..040d4d4 100644
62490 --- a/init/initramfs.c
62491 +++ b/init/initramfs.c
62492 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62493 }
62494 }
62495
62496 -static long __init do_utime(char __user *filename, time_t mtime)
62497 +static long __init do_utime(__force char __user *filename, time_t mtime)
62498 {
62499 struct timespec t[2];
62500
62501 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62502 struct dir_entry *de, *tmp;
62503 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62504 list_del(&de->list);
62505 - do_utime(de->name, de->mtime);
62506 + do_utime((char __force_user *)de->name, de->mtime);
62507 kfree(de->name);
62508 kfree(de);
62509 }
62510 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62511 if (nlink >= 2) {
62512 char *old = find_link(major, minor, ino, mode, collected);
62513 if (old)
62514 - return (sys_link(old, collected) < 0) ? -1 : 1;
62515 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62516 }
62517 return 0;
62518 }
62519 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62520 {
62521 struct stat st;
62522
62523 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62524 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62525 if (S_ISDIR(st.st_mode))
62526 - sys_rmdir(path);
62527 + sys_rmdir((char __force_user *)path);
62528 else
62529 - sys_unlink(path);
62530 + sys_unlink((char __force_user *)path);
62531 }
62532 }
62533
62534 @@ -305,7 +305,7 @@ static int __init do_name(void)
62535 int openflags = O_WRONLY|O_CREAT;
62536 if (ml != 1)
62537 openflags |= O_TRUNC;
62538 - wfd = sys_open(collected, openflags, mode);
62539 + wfd = sys_open((char __force_user *)collected, openflags, mode);
62540
62541 if (wfd >= 0) {
62542 sys_fchown(wfd, uid, gid);
62543 @@ -317,17 +317,17 @@ static int __init do_name(void)
62544 }
62545 }
62546 } else if (S_ISDIR(mode)) {
62547 - sys_mkdir(collected, mode);
62548 - sys_chown(collected, uid, gid);
62549 - sys_chmod(collected, mode);
62550 + sys_mkdir((char __force_user *)collected, mode);
62551 + sys_chown((char __force_user *)collected, uid, gid);
62552 + sys_chmod((char __force_user *)collected, mode);
62553 dir_add(collected, mtime);
62554 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62555 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62556 if (maybe_link() == 0) {
62557 - sys_mknod(collected, mode, rdev);
62558 - sys_chown(collected, uid, gid);
62559 - sys_chmod(collected, mode);
62560 - do_utime(collected, mtime);
62561 + sys_mknod((char __force_user *)collected, mode, rdev);
62562 + sys_chown((char __force_user *)collected, uid, gid);
62563 + sys_chmod((char __force_user *)collected, mode);
62564 + do_utime((char __force_user *)collected, mtime);
62565 }
62566 }
62567 return 0;
62568 @@ -336,15 +336,15 @@ static int __init do_name(void)
62569 static int __init do_copy(void)
62570 {
62571 if (count >= body_len) {
62572 - sys_write(wfd, victim, body_len);
62573 + sys_write(wfd, (char __force_user *)victim, body_len);
62574 sys_close(wfd);
62575 - do_utime(vcollected, mtime);
62576 + do_utime((char __force_user *)vcollected, mtime);
62577 kfree(vcollected);
62578 eat(body_len);
62579 state = SkipIt;
62580 return 0;
62581 } else {
62582 - sys_write(wfd, victim, count);
62583 + sys_write(wfd, (char __force_user *)victim, count);
62584 body_len -= count;
62585 eat(count);
62586 return 1;
62587 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62588 {
62589 collected[N_ALIGN(name_len) + body_len] = '\0';
62590 clean_path(collected, 0);
62591 - sys_symlink(collected + N_ALIGN(name_len), collected);
62592 - sys_lchown(collected, uid, gid);
62593 - do_utime(collected, mtime);
62594 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62595 + sys_lchown((char __force_user *)collected, uid, gid);
62596 + do_utime((char __force_user *)collected, mtime);
62597 state = SkipIt;
62598 next_state = Reset;
62599 return 0;
62600 diff --git a/init/main.c b/init/main.c
62601 index 217ed23..ec5406f 100644
62602 --- a/init/main.c
62603 +++ b/init/main.c
62604 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62605 extern void tc_init(void);
62606 #endif
62607
62608 +extern void grsecurity_init(void);
62609 +
62610 /*
62611 * Debug helper: via this flag we know that we are in 'early bootup code'
62612 * where only the boot processor is running with IRQ disabled. This means
62613 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62614
62615 __setup("reset_devices", set_reset_devices);
62616
62617 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62618 +extern char pax_enter_kernel_user[];
62619 +extern char pax_exit_kernel_user[];
62620 +extern pgdval_t clone_pgd_mask;
62621 +#endif
62622 +
62623 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62624 +static int __init setup_pax_nouderef(char *str)
62625 +{
62626 +#ifdef CONFIG_X86_32
62627 + unsigned int cpu;
62628 + struct desc_struct *gdt;
62629 +
62630 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
62631 + gdt = get_cpu_gdt_table(cpu);
62632 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62633 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62634 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62635 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62636 + }
62637 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62638 +#else
62639 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62640 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62641 + clone_pgd_mask = ~(pgdval_t)0UL;
62642 +#endif
62643 +
62644 + return 0;
62645 +}
62646 +early_param("pax_nouderef", setup_pax_nouderef);
62647 +#endif
62648 +
62649 +#ifdef CONFIG_PAX_SOFTMODE
62650 +int pax_softmode;
62651 +
62652 +static int __init setup_pax_softmode(char *str)
62653 +{
62654 + get_option(&str, &pax_softmode);
62655 + return 1;
62656 +}
62657 +__setup("pax_softmode=", setup_pax_softmode);
62658 +#endif
62659 +
62660 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62661 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62662 static const char *panic_later, *panic_param;
62663 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62664 {
62665 int count = preempt_count();
62666 int ret;
62667 + const char *msg1 = "", *msg2 = "";
62668
62669 if (initcall_debug)
62670 ret = do_one_initcall_debug(fn);
62671 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62672 sprintf(msgbuf, "error code %d ", ret);
62673
62674 if (preempt_count() != count) {
62675 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62676 + msg1 = " preemption imbalance";
62677 preempt_count() = count;
62678 }
62679 if (irqs_disabled()) {
62680 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62681 + msg2 = " disabled interrupts";
62682 local_irq_enable();
62683 }
62684 - if (msgbuf[0]) {
62685 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62686 + if (msgbuf[0] || *msg1 || *msg2) {
62687 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62688 }
62689
62690 return ret;
62691 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62692 do_basic_setup();
62693
62694 /* Open the /dev/console on the rootfs, this should never fail */
62695 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62696 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62697 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62698
62699 (void) sys_dup(0);
62700 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62701 if (!ramdisk_execute_command)
62702 ramdisk_execute_command = "/init";
62703
62704 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62705 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62706 ramdisk_execute_command = NULL;
62707 prepare_namespace();
62708 }
62709
62710 + grsecurity_init();
62711 +
62712 /*
62713 * Ok, we have completed the initial bootup, and
62714 * we're essentially up and running. Get rid of the
62715 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62716 index 5b4293d..f179875 100644
62717 --- a/ipc/mqueue.c
62718 +++ b/ipc/mqueue.c
62719 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62720 mq_bytes = (mq_msg_tblsz +
62721 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62722
62723 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62724 spin_lock(&mq_lock);
62725 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62726 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62727 diff --git a/ipc/msg.c b/ipc/msg.c
62728 index 7385de2..a8180e0 100644
62729 --- a/ipc/msg.c
62730 +++ b/ipc/msg.c
62731 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62732 return security_msg_queue_associate(msq, msgflg);
62733 }
62734
62735 +static struct ipc_ops msg_ops = {
62736 + .getnew = newque,
62737 + .associate = msg_security,
62738 + .more_checks = NULL
62739 +};
62740 +
62741 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62742 {
62743 struct ipc_namespace *ns;
62744 - struct ipc_ops msg_ops;
62745 struct ipc_params msg_params;
62746
62747 ns = current->nsproxy->ipc_ns;
62748
62749 - msg_ops.getnew = newque;
62750 - msg_ops.associate = msg_security;
62751 - msg_ops.more_checks = NULL;
62752 -
62753 msg_params.key = key;
62754 msg_params.flg = msgflg;
62755
62756 diff --git a/ipc/sem.c b/ipc/sem.c
62757 index 5215a81..cfc0cac 100644
62758 --- a/ipc/sem.c
62759 +++ b/ipc/sem.c
62760 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62761 return 0;
62762 }
62763
62764 +static struct ipc_ops sem_ops = {
62765 + .getnew = newary,
62766 + .associate = sem_security,
62767 + .more_checks = sem_more_checks
62768 +};
62769 +
62770 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62771 {
62772 struct ipc_namespace *ns;
62773 - struct ipc_ops sem_ops;
62774 struct ipc_params sem_params;
62775
62776 ns = current->nsproxy->ipc_ns;
62777 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62778 if (nsems < 0 || nsems > ns->sc_semmsl)
62779 return -EINVAL;
62780
62781 - sem_ops.getnew = newary;
62782 - sem_ops.associate = sem_security;
62783 - sem_ops.more_checks = sem_more_checks;
62784 -
62785 sem_params.key = key;
62786 sem_params.flg = semflg;
62787 sem_params.u.nsems = nsems;
62788 diff --git a/ipc/shm.c b/ipc/shm.c
62789 index b76be5b..859e750 100644
62790 --- a/ipc/shm.c
62791 +++ b/ipc/shm.c
62792 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62793 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62794 #endif
62795
62796 +#ifdef CONFIG_GRKERNSEC
62797 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62798 + const time_t shm_createtime, const uid_t cuid,
62799 + const int shmid);
62800 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62801 + const time_t shm_createtime);
62802 +#endif
62803 +
62804 void shm_init_ns(struct ipc_namespace *ns)
62805 {
62806 ns->shm_ctlmax = SHMMAX;
62807 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62808 shp->shm_lprid = 0;
62809 shp->shm_atim = shp->shm_dtim = 0;
62810 shp->shm_ctim = get_seconds();
62811 +#ifdef CONFIG_GRKERNSEC
62812 + {
62813 + struct timespec timeval;
62814 + do_posix_clock_monotonic_gettime(&timeval);
62815 +
62816 + shp->shm_createtime = timeval.tv_sec;
62817 + }
62818 +#endif
62819 shp->shm_segsz = size;
62820 shp->shm_nattch = 0;
62821 shp->shm_file = file;
62822 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62823 return 0;
62824 }
62825
62826 +static struct ipc_ops shm_ops = {
62827 + .getnew = newseg,
62828 + .associate = shm_security,
62829 + .more_checks = shm_more_checks
62830 +};
62831 +
62832 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62833 {
62834 struct ipc_namespace *ns;
62835 - struct ipc_ops shm_ops;
62836 struct ipc_params shm_params;
62837
62838 ns = current->nsproxy->ipc_ns;
62839
62840 - shm_ops.getnew = newseg;
62841 - shm_ops.associate = shm_security;
62842 - shm_ops.more_checks = shm_more_checks;
62843 -
62844 shm_params.key = key;
62845 shm_params.flg = shmflg;
62846 shm_params.u.size = size;
62847 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62848 f_mode = FMODE_READ | FMODE_WRITE;
62849 }
62850 if (shmflg & SHM_EXEC) {
62851 +
62852 +#ifdef CONFIG_PAX_MPROTECT
62853 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
62854 + goto out;
62855 +#endif
62856 +
62857 prot |= PROT_EXEC;
62858 acc_mode |= S_IXUGO;
62859 }
62860 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62861 if (err)
62862 goto out_unlock;
62863
62864 +#ifdef CONFIG_GRKERNSEC
62865 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62866 + shp->shm_perm.cuid, shmid) ||
62867 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62868 + err = -EACCES;
62869 + goto out_unlock;
62870 + }
62871 +#endif
62872 +
62873 path = shp->shm_file->f_path;
62874 path_get(&path);
62875 shp->shm_nattch++;
62876 +#ifdef CONFIG_GRKERNSEC
62877 + shp->shm_lapid = current->pid;
62878 +#endif
62879 size = i_size_read(path.dentry->d_inode);
62880 shm_unlock(shp);
62881
62882 diff --git a/kernel/acct.c b/kernel/acct.c
62883 index fa7eb3d..7faf116 100644
62884 --- a/kernel/acct.c
62885 +++ b/kernel/acct.c
62886 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62887 */
62888 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62889 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62890 - file->f_op->write(file, (char *)&ac,
62891 + file->f_op->write(file, (char __force_user *)&ac,
62892 sizeof(acct_t), &file->f_pos);
62893 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62894 set_fs(fs);
62895 diff --git a/kernel/audit.c b/kernel/audit.c
62896 index 09fae26..ed71d5b 100644
62897 --- a/kernel/audit.c
62898 +++ b/kernel/audit.c
62899 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62900 3) suppressed due to audit_rate_limit
62901 4) suppressed due to audit_backlog_limit
62902 */
62903 -static atomic_t audit_lost = ATOMIC_INIT(0);
62904 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62905
62906 /* The netlink socket. */
62907 static struct sock *audit_sock;
62908 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62909 unsigned long now;
62910 int print;
62911
62912 - atomic_inc(&audit_lost);
62913 + atomic_inc_unchecked(&audit_lost);
62914
62915 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62916
62917 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62918 printk(KERN_WARNING
62919 "audit: audit_lost=%d audit_rate_limit=%d "
62920 "audit_backlog_limit=%d\n",
62921 - atomic_read(&audit_lost),
62922 + atomic_read_unchecked(&audit_lost),
62923 audit_rate_limit,
62924 audit_backlog_limit);
62925 audit_panic(message);
62926 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62927 status_set.pid = audit_pid;
62928 status_set.rate_limit = audit_rate_limit;
62929 status_set.backlog_limit = audit_backlog_limit;
62930 - status_set.lost = atomic_read(&audit_lost);
62931 + status_set.lost = atomic_read_unchecked(&audit_lost);
62932 status_set.backlog = skb_queue_len(&audit_skb_queue);
62933 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62934 &status_set, sizeof(status_set));
62935 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62936 avail = audit_expand(ab,
62937 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62938 if (!avail)
62939 - goto out;
62940 + goto out_va_end;
62941 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62942 }
62943 - va_end(args2);
62944 if (len > 0)
62945 skb_put(skb, len);
62946 +out_va_end:
62947 + va_end(args2);
62948 out:
62949 return;
62950 }
62951 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62952 index 47b7fc1..c003c33 100644
62953 --- a/kernel/auditsc.c
62954 +++ b/kernel/auditsc.c
62955 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62956 struct audit_buffer **ab,
62957 struct audit_aux_data_execve *axi)
62958 {
62959 - int i;
62960 - size_t len, len_sent = 0;
62961 + int i, len;
62962 + size_t len_sent = 0;
62963 const char __user *p;
62964 char *buf;
62965
62966 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62967 }
62968
62969 /* global counter which is incremented every time something logs in */
62970 -static atomic_t session_id = ATOMIC_INIT(0);
62971 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62972
62973 /**
62974 * audit_set_loginuid - set a task's audit_context loginuid
62975 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62976 */
62977 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62978 {
62979 - unsigned int sessionid = atomic_inc_return(&session_id);
62980 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62981 struct audit_context *context = task->audit_context;
62982
62983 if (context && context->in_syscall) {
62984 diff --git a/kernel/capability.c b/kernel/capability.c
62985 index b463871..fa3ea1f 100644
62986 --- a/kernel/capability.c
62987 +++ b/kernel/capability.c
62988 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62989 * before modification is attempted and the application
62990 * fails.
62991 */
62992 + if (tocopy > ARRAY_SIZE(kdata))
62993 + return -EFAULT;
62994 +
62995 if (copy_to_user(dataptr, kdata, tocopy
62996 * sizeof(struct __user_cap_data_struct))) {
62997 return -EFAULT;
62998 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62999 BUG();
63000 }
63001
63002 - if (security_capable(ns, current_cred(), cap) == 0) {
63003 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
63004 current->flags |= PF_SUPERPRIV;
63005 return true;
63006 }
63007 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
63008 }
63009 EXPORT_SYMBOL(ns_capable);
63010
63011 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
63012 +{
63013 + if (unlikely(!cap_valid(cap))) {
63014 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
63015 + BUG();
63016 + }
63017 +
63018 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
63019 + current->flags |= PF_SUPERPRIV;
63020 + return true;
63021 + }
63022 + return false;
63023 +}
63024 +EXPORT_SYMBOL(ns_capable_nolog);
63025 +
63026 +bool capable_nolog(int cap)
63027 +{
63028 + return ns_capable_nolog(&init_user_ns, cap);
63029 +}
63030 +EXPORT_SYMBOL(capable_nolog);
63031 +
63032 /**
63033 * task_ns_capable - Determine whether current task has a superior
63034 * capability targeted at a specific task's user namespace.
63035 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
63036 }
63037 EXPORT_SYMBOL(task_ns_capable);
63038
63039 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
63040 +{
63041 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
63042 +}
63043 +EXPORT_SYMBOL(task_ns_capable_nolog);
63044 +
63045 /**
63046 * nsown_capable - Check superior capability to one's own user_ns
63047 * @cap: The capability in question
63048 diff --git a/kernel/compat.c b/kernel/compat.c
63049 index f346ced..aa2b1f4 100644
63050 --- a/kernel/compat.c
63051 +++ b/kernel/compat.c
63052 @@ -13,6 +13,7 @@
63053
63054 #include <linux/linkage.h>
63055 #include <linux/compat.h>
63056 +#include <linux/module.h>
63057 #include <linux/errno.h>
63058 #include <linux/time.h>
63059 #include <linux/signal.h>
63060 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
63061 mm_segment_t oldfs;
63062 long ret;
63063
63064 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
63065 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
63066 oldfs = get_fs();
63067 set_fs(KERNEL_DS);
63068 ret = hrtimer_nanosleep_restart(restart);
63069 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
63070 oldfs = get_fs();
63071 set_fs(KERNEL_DS);
63072 ret = hrtimer_nanosleep(&tu,
63073 - rmtp ? (struct timespec __user *)&rmt : NULL,
63074 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
63075 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
63076 set_fs(oldfs);
63077
63078 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
63079 mm_segment_t old_fs = get_fs();
63080
63081 set_fs(KERNEL_DS);
63082 - ret = sys_sigpending((old_sigset_t __user *) &s);
63083 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
63084 set_fs(old_fs);
63085 if (ret == 0)
63086 ret = put_user(s, set);
63087 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
63088 old_fs = get_fs();
63089 set_fs(KERNEL_DS);
63090 ret = sys_sigprocmask(how,
63091 - set ? (old_sigset_t __user *) &s : NULL,
63092 - oset ? (old_sigset_t __user *) &s : NULL);
63093 + set ? (old_sigset_t __force_user *) &s : NULL,
63094 + oset ? (old_sigset_t __force_user *) &s : NULL);
63095 set_fs(old_fs);
63096 if (ret == 0)
63097 if (oset)
63098 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
63099 mm_segment_t old_fs = get_fs();
63100
63101 set_fs(KERNEL_DS);
63102 - ret = sys_old_getrlimit(resource, &r);
63103 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
63104 set_fs(old_fs);
63105
63106 if (!ret) {
63107 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
63108 mm_segment_t old_fs = get_fs();
63109
63110 set_fs(KERNEL_DS);
63111 - ret = sys_getrusage(who, (struct rusage __user *) &r);
63112 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
63113 set_fs(old_fs);
63114
63115 if (ret)
63116 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
63117 set_fs (KERNEL_DS);
63118 ret = sys_wait4(pid,
63119 (stat_addr ?
63120 - (unsigned int __user *) &status : NULL),
63121 - options, (struct rusage __user *) &r);
63122 + (unsigned int __force_user *) &status : NULL),
63123 + options, (struct rusage __force_user *) &r);
63124 set_fs (old_fs);
63125
63126 if (ret > 0) {
63127 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
63128 memset(&info, 0, sizeof(info));
63129
63130 set_fs(KERNEL_DS);
63131 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
63132 - uru ? (struct rusage __user *)&ru : NULL);
63133 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
63134 + uru ? (struct rusage __force_user *)&ru : NULL);
63135 set_fs(old_fs);
63136
63137 if ((ret < 0) || (info.si_signo == 0))
63138 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
63139 oldfs = get_fs();
63140 set_fs(KERNEL_DS);
63141 err = sys_timer_settime(timer_id, flags,
63142 - (struct itimerspec __user *) &newts,
63143 - (struct itimerspec __user *) &oldts);
63144 + (struct itimerspec __force_user *) &newts,
63145 + (struct itimerspec __force_user *) &oldts);
63146 set_fs(oldfs);
63147 if (!err && old && put_compat_itimerspec(old, &oldts))
63148 return -EFAULT;
63149 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
63150 oldfs = get_fs();
63151 set_fs(KERNEL_DS);
63152 err = sys_timer_gettime(timer_id,
63153 - (struct itimerspec __user *) &ts);
63154 + (struct itimerspec __force_user *) &ts);
63155 set_fs(oldfs);
63156 if (!err && put_compat_itimerspec(setting, &ts))
63157 return -EFAULT;
63158 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
63159 oldfs = get_fs();
63160 set_fs(KERNEL_DS);
63161 err = sys_clock_settime(which_clock,
63162 - (struct timespec __user *) &ts);
63163 + (struct timespec __force_user *) &ts);
63164 set_fs(oldfs);
63165 return err;
63166 }
63167 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
63168 oldfs = get_fs();
63169 set_fs(KERNEL_DS);
63170 err = sys_clock_gettime(which_clock,
63171 - (struct timespec __user *) &ts);
63172 + (struct timespec __force_user *) &ts);
63173 set_fs(oldfs);
63174 if (!err && put_compat_timespec(&ts, tp))
63175 return -EFAULT;
63176 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
63177
63178 oldfs = get_fs();
63179 set_fs(KERNEL_DS);
63180 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
63181 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
63182 set_fs(oldfs);
63183
63184 err = compat_put_timex(utp, &txc);
63185 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
63186 oldfs = get_fs();
63187 set_fs(KERNEL_DS);
63188 err = sys_clock_getres(which_clock,
63189 - (struct timespec __user *) &ts);
63190 + (struct timespec __force_user *) &ts);
63191 set_fs(oldfs);
63192 if (!err && tp && put_compat_timespec(&ts, tp))
63193 return -EFAULT;
63194 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
63195 long err;
63196 mm_segment_t oldfs;
63197 struct timespec tu;
63198 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
63199 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
63200
63201 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
63202 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
63203 oldfs = get_fs();
63204 set_fs(KERNEL_DS);
63205 err = clock_nanosleep_restart(restart);
63206 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
63207 oldfs = get_fs();
63208 set_fs(KERNEL_DS);
63209 err = sys_clock_nanosleep(which_clock, flags,
63210 - (struct timespec __user *) &in,
63211 - (struct timespec __user *) &out);
63212 + (struct timespec __force_user *) &in,
63213 + (struct timespec __force_user *) &out);
63214 set_fs(oldfs);
63215
63216 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
63217 diff --git a/kernel/configs.c b/kernel/configs.c
63218 index 42e8fa0..9e7406b 100644
63219 --- a/kernel/configs.c
63220 +++ b/kernel/configs.c
63221 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
63222 struct proc_dir_entry *entry;
63223
63224 /* create the current config file */
63225 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63226 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
63227 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
63228 + &ikconfig_file_ops);
63229 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63230 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
63231 + &ikconfig_file_ops);
63232 +#endif
63233 +#else
63234 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
63235 &ikconfig_file_ops);
63236 +#endif
63237 +
63238 if (!entry)
63239 return -ENOMEM;
63240
63241 diff --git a/kernel/cred.c b/kernel/cred.c
63242 index 5791612..a3c04dc 100644
63243 --- a/kernel/cred.c
63244 +++ b/kernel/cred.c
63245 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
63246 validate_creds(cred);
63247 put_cred(cred);
63248 }
63249 +
63250 +#ifdef CONFIG_GRKERNSEC_SETXID
63251 + cred = (struct cred *) tsk->delayed_cred;
63252 + if (cred) {
63253 + tsk->delayed_cred = NULL;
63254 + validate_creds(cred);
63255 + put_cred(cred);
63256 + }
63257 +#endif
63258 }
63259
63260 /**
63261 @@ -470,7 +479,7 @@ error_put:
63262 * Always returns 0 thus allowing this function to be tail-called at the end
63263 * of, say, sys_setgid().
63264 */
63265 -int commit_creds(struct cred *new)
63266 +static int __commit_creds(struct cred *new)
63267 {
63268 struct task_struct *task = current;
63269 const struct cred *old = task->real_cred;
63270 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
63271
63272 get_cred(new); /* we will require a ref for the subj creds too */
63273
63274 + gr_set_role_label(task, new->uid, new->gid);
63275 +
63276 /* dumpability changes */
63277 if (old->euid != new->euid ||
63278 old->egid != new->egid ||
63279 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63280 put_cred(old);
63281 return 0;
63282 }
63283 +#ifdef CONFIG_GRKERNSEC_SETXID
63284 +extern int set_user(struct cred *new);
63285 +
63286 +void gr_delayed_cred_worker(void)
63287 +{
63288 + const struct cred *new = current->delayed_cred;
63289 + struct cred *ncred;
63290 +
63291 + current->delayed_cred = NULL;
63292 +
63293 + if (current_uid() && new != NULL) {
63294 + // from doing get_cred on it when queueing this
63295 + put_cred(new);
63296 + return;
63297 + } else if (new == NULL)
63298 + return;
63299 +
63300 + ncred = prepare_creds();
63301 + if (!ncred)
63302 + goto die;
63303 + // uids
63304 + ncred->uid = new->uid;
63305 + ncred->euid = new->euid;
63306 + ncred->suid = new->suid;
63307 + ncred->fsuid = new->fsuid;
63308 + // gids
63309 + ncred->gid = new->gid;
63310 + ncred->egid = new->egid;
63311 + ncred->sgid = new->sgid;
63312 + ncred->fsgid = new->fsgid;
63313 + // groups
63314 + if (set_groups(ncred, new->group_info) < 0) {
63315 + abort_creds(ncred);
63316 + goto die;
63317 + }
63318 + // caps
63319 + ncred->securebits = new->securebits;
63320 + ncred->cap_inheritable = new->cap_inheritable;
63321 + ncred->cap_permitted = new->cap_permitted;
63322 + ncred->cap_effective = new->cap_effective;
63323 + ncred->cap_bset = new->cap_bset;
63324 +
63325 + if (set_user(ncred)) {
63326 + abort_creds(ncred);
63327 + goto die;
63328 + }
63329 +
63330 + // from doing get_cred on it when queueing this
63331 + put_cred(new);
63332 +
63333 + __commit_creds(ncred);
63334 + return;
63335 +die:
63336 + // from doing get_cred on it when queueing this
63337 + put_cred(new);
63338 + do_group_exit(SIGKILL);
63339 +}
63340 +#endif
63341 +
63342 +int commit_creds(struct cred *new)
63343 +{
63344 +#ifdef CONFIG_GRKERNSEC_SETXID
63345 + struct task_struct *t;
63346 +
63347 + /* we won't get called with tasklist_lock held for writing
63348 + and interrupts disabled as the cred struct in that case is
63349 + init_cred
63350 + */
63351 + if (grsec_enable_setxid && !current_is_single_threaded() &&
63352 + !current_uid() && new->uid) {
63353 + rcu_read_lock();
63354 + read_lock(&tasklist_lock);
63355 + for (t = next_thread(current); t != current;
63356 + t = next_thread(t)) {
63357 + if (t->delayed_cred == NULL) {
63358 + t->delayed_cred = get_cred(new);
63359 + set_tsk_need_resched(t);
63360 + }
63361 + }
63362 + read_unlock(&tasklist_lock);
63363 + rcu_read_unlock();
63364 + }
63365 +#endif
63366 + return __commit_creds(new);
63367 +}
63368 +
63369 EXPORT_SYMBOL(commit_creds);
63370
63371 /**
63372 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63373 index 0d7c087..01b8cef 100644
63374 --- a/kernel/debug/debug_core.c
63375 +++ b/kernel/debug/debug_core.c
63376 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63377 */
63378 static atomic_t masters_in_kgdb;
63379 static atomic_t slaves_in_kgdb;
63380 -static atomic_t kgdb_break_tasklet_var;
63381 +static atomic_unchecked_t kgdb_break_tasklet_var;
63382 atomic_t kgdb_setting_breakpoint;
63383
63384 struct task_struct *kgdb_usethread;
63385 @@ -129,7 +129,7 @@ int kgdb_single_step;
63386 static pid_t kgdb_sstep_pid;
63387
63388 /* to keep track of the CPU which is doing the single stepping*/
63389 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63390 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63391
63392 /*
63393 * If you are debugging a problem where roundup (the collection of
63394 @@ -542,7 +542,7 @@ return_normal:
63395 * kernel will only try for the value of sstep_tries before
63396 * giving up and continuing on.
63397 */
63398 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63399 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63400 (kgdb_info[cpu].task &&
63401 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63402 atomic_set(&kgdb_active, -1);
63403 @@ -636,8 +636,8 @@ cpu_master_loop:
63404 }
63405
63406 kgdb_restore:
63407 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63408 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63409 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63410 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63411 if (kgdb_info[sstep_cpu].task)
63412 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63413 else
63414 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63415 static void kgdb_tasklet_bpt(unsigned long ing)
63416 {
63417 kgdb_breakpoint();
63418 - atomic_set(&kgdb_break_tasklet_var, 0);
63419 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63420 }
63421
63422 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63423
63424 void kgdb_schedule_breakpoint(void)
63425 {
63426 - if (atomic_read(&kgdb_break_tasklet_var) ||
63427 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63428 atomic_read(&kgdb_active) != -1 ||
63429 atomic_read(&kgdb_setting_breakpoint))
63430 return;
63431 - atomic_inc(&kgdb_break_tasklet_var);
63432 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
63433 tasklet_schedule(&kgdb_tasklet_breakpoint);
63434 }
63435 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63436 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63437 index 63786e7..0780cac 100644
63438 --- a/kernel/debug/kdb/kdb_main.c
63439 +++ b/kernel/debug/kdb/kdb_main.c
63440 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63441 list_for_each_entry(mod, kdb_modules, list) {
63442
63443 kdb_printf("%-20s%8u 0x%p ", mod->name,
63444 - mod->core_size, (void *)mod);
63445 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
63446 #ifdef CONFIG_MODULE_UNLOAD
63447 kdb_printf("%4d ", module_refcount(mod));
63448 #endif
63449 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63450 kdb_printf(" (Loading)");
63451 else
63452 kdb_printf(" (Live)");
63453 - kdb_printf(" 0x%p", mod->module_core);
63454 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63455
63456 #ifdef CONFIG_MODULE_UNLOAD
63457 {
63458 diff --git a/kernel/events/core.c b/kernel/events/core.c
63459 index 58690af..d903d75 100644
63460 --- a/kernel/events/core.c
63461 +++ b/kernel/events/core.c
63462 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63463 return 0;
63464 }
63465
63466 -static atomic64_t perf_event_id;
63467 +static atomic64_unchecked_t perf_event_id;
63468
63469 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63470 enum event_type_t event_type);
63471 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63472
63473 static inline u64 perf_event_count(struct perf_event *event)
63474 {
63475 - return local64_read(&event->count) + atomic64_read(&event->child_count);
63476 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63477 }
63478
63479 static u64 perf_event_read(struct perf_event *event)
63480 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63481 mutex_lock(&event->child_mutex);
63482 total += perf_event_read(event);
63483 *enabled += event->total_time_enabled +
63484 - atomic64_read(&event->child_total_time_enabled);
63485 + atomic64_read_unchecked(&event->child_total_time_enabled);
63486 *running += event->total_time_running +
63487 - atomic64_read(&event->child_total_time_running);
63488 + atomic64_read_unchecked(&event->child_total_time_running);
63489
63490 list_for_each_entry(child, &event->child_list, child_list) {
63491 total += perf_event_read(child);
63492 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63493 userpg->offset -= local64_read(&event->hw.prev_count);
63494
63495 userpg->time_enabled = enabled +
63496 - atomic64_read(&event->child_total_time_enabled);
63497 + atomic64_read_unchecked(&event->child_total_time_enabled);
63498
63499 userpg->time_running = running +
63500 - atomic64_read(&event->child_total_time_running);
63501 + atomic64_read_unchecked(&event->child_total_time_running);
63502
63503 barrier();
63504 ++userpg->lock;
63505 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63506 values[n++] = perf_event_count(event);
63507 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63508 values[n++] = enabled +
63509 - atomic64_read(&event->child_total_time_enabled);
63510 + atomic64_read_unchecked(&event->child_total_time_enabled);
63511 }
63512 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63513 values[n++] = running +
63514 - atomic64_read(&event->child_total_time_running);
63515 + atomic64_read_unchecked(&event->child_total_time_running);
63516 }
63517 if (read_format & PERF_FORMAT_ID)
63518 values[n++] = primary_event_id(event);
63519 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63520 * need to add enough zero bytes after the string to handle
63521 * the 64bit alignment we do later.
63522 */
63523 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63524 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
63525 if (!buf) {
63526 name = strncpy(tmp, "//enomem", sizeof(tmp));
63527 goto got_name;
63528 }
63529 - name = d_path(&file->f_path, buf, PATH_MAX);
63530 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63531 if (IS_ERR(name)) {
63532 name = strncpy(tmp, "//toolong", sizeof(tmp));
63533 goto got_name;
63534 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63535 event->parent = parent_event;
63536
63537 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63538 - event->id = atomic64_inc_return(&perf_event_id);
63539 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
63540
63541 event->state = PERF_EVENT_STATE_INACTIVE;
63542
63543 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63544 /*
63545 * Add back the child's count to the parent's count:
63546 */
63547 - atomic64_add(child_val, &parent_event->child_count);
63548 - atomic64_add(child_event->total_time_enabled,
63549 + atomic64_add_unchecked(child_val, &parent_event->child_count);
63550 + atomic64_add_unchecked(child_event->total_time_enabled,
63551 &parent_event->child_total_time_enabled);
63552 - atomic64_add(child_event->total_time_running,
63553 + atomic64_add_unchecked(child_event->total_time_running,
63554 &parent_event->child_total_time_running);
63555
63556 /*
63557 diff --git a/kernel/exit.c b/kernel/exit.c
63558 index e6e01b9..619f837 100644
63559 --- a/kernel/exit.c
63560 +++ b/kernel/exit.c
63561 @@ -57,6 +57,10 @@
63562 #include <asm/pgtable.h>
63563 #include <asm/mmu_context.h>
63564
63565 +#ifdef CONFIG_GRKERNSEC
63566 +extern rwlock_t grsec_exec_file_lock;
63567 +#endif
63568 +
63569 static void exit_mm(struct task_struct * tsk);
63570
63571 static void __unhash_process(struct task_struct *p, bool group_dead)
63572 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63573 struct task_struct *leader;
63574 int zap_leader;
63575 repeat:
63576 +#ifdef CONFIG_NET
63577 + gr_del_task_from_ip_table(p);
63578 +#endif
63579 +
63580 /* don't need to get the RCU readlock here - the process is dead and
63581 * can't be modifying its own credentials. But shut RCU-lockdep up */
63582 rcu_read_lock();
63583 @@ -380,7 +388,7 @@ int allow_signal(int sig)
63584 * know it'll be handled, so that they don't get converted to
63585 * SIGKILL or just silently dropped.
63586 */
63587 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63588 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63589 recalc_sigpending();
63590 spin_unlock_irq(&current->sighand->siglock);
63591 return 0;
63592 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63593 vsnprintf(current->comm, sizeof(current->comm), name, args);
63594 va_end(args);
63595
63596 +#ifdef CONFIG_GRKERNSEC
63597 + write_lock(&grsec_exec_file_lock);
63598 + if (current->exec_file) {
63599 + fput(current->exec_file);
63600 + current->exec_file = NULL;
63601 + }
63602 + write_unlock(&grsec_exec_file_lock);
63603 +#endif
63604 +
63605 + gr_set_kernel_label(current);
63606 +
63607 /*
63608 * If we were started as result of loading a module, close all of the
63609 * user space pages. We don't need them, and if we didn't close them
63610 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63611 struct task_struct *tsk = current;
63612 int group_dead;
63613
63614 + set_fs(USER_DS);
63615 +
63616 profile_task_exit(tsk);
63617
63618 WARN_ON(blk_needs_flush_plug(tsk));
63619 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63620 * mm_release()->clear_child_tid() from writing to a user-controlled
63621 * kernel address.
63622 */
63623 - set_fs(USER_DS);
63624
63625 ptrace_event(PTRACE_EVENT_EXIT, code);
63626
63627 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63628 tsk->exit_code = code;
63629 taskstats_exit(tsk, group_dead);
63630
63631 + gr_acl_handle_psacct(tsk, code);
63632 + gr_acl_handle_exit();
63633 +
63634 exit_mm(tsk);
63635
63636 if (group_dead)
63637 diff --git a/kernel/fork.c b/kernel/fork.c
63638 index da4a6a1..0973380 100644
63639 --- a/kernel/fork.c
63640 +++ b/kernel/fork.c
63641 @@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63642 *stackend = STACK_END_MAGIC; /* for overflow detection */
63643
63644 #ifdef CONFIG_CC_STACKPROTECTOR
63645 - tsk->stack_canary = get_random_int();
63646 + tsk->stack_canary = pax_get_random_long();
63647 #endif
63648
63649 /*
63650 @@ -304,13 +304,77 @@ out:
63651 }
63652
63653 #ifdef CONFIG_MMU
63654 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63655 +{
63656 + struct vm_area_struct *tmp;
63657 + unsigned long charge;
63658 + struct mempolicy *pol;
63659 + struct file *file;
63660 +
63661 + charge = 0;
63662 + if (mpnt->vm_flags & VM_ACCOUNT) {
63663 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63664 + if (security_vm_enough_memory(len))
63665 + goto fail_nomem;
63666 + charge = len;
63667 + }
63668 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63669 + if (!tmp)
63670 + goto fail_nomem;
63671 + *tmp = *mpnt;
63672 + tmp->vm_mm = mm;
63673 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63674 + pol = mpol_dup(vma_policy(mpnt));
63675 + if (IS_ERR(pol))
63676 + goto fail_nomem_policy;
63677 + vma_set_policy(tmp, pol);
63678 + if (anon_vma_fork(tmp, mpnt))
63679 + goto fail_nomem_anon_vma_fork;
63680 + tmp->vm_flags &= ~VM_LOCKED;
63681 + tmp->vm_next = tmp->vm_prev = NULL;
63682 + tmp->vm_mirror = NULL;
63683 + file = tmp->vm_file;
63684 + if (file) {
63685 + struct inode *inode = file->f_path.dentry->d_inode;
63686 + struct address_space *mapping = file->f_mapping;
63687 +
63688 + get_file(file);
63689 + if (tmp->vm_flags & VM_DENYWRITE)
63690 + atomic_dec(&inode->i_writecount);
63691 + mutex_lock(&mapping->i_mmap_mutex);
63692 + if (tmp->vm_flags & VM_SHARED)
63693 + mapping->i_mmap_writable++;
63694 + flush_dcache_mmap_lock(mapping);
63695 + /* insert tmp into the share list, just after mpnt */
63696 + vma_prio_tree_add(tmp, mpnt);
63697 + flush_dcache_mmap_unlock(mapping);
63698 + mutex_unlock(&mapping->i_mmap_mutex);
63699 + }
63700 +
63701 + /*
63702 + * Clear hugetlb-related page reserves for children. This only
63703 + * affects MAP_PRIVATE mappings. Faults generated by the child
63704 + * are not guaranteed to succeed, even if read-only
63705 + */
63706 + if (is_vm_hugetlb_page(tmp))
63707 + reset_vma_resv_huge_pages(tmp);
63708 +
63709 + return tmp;
63710 +
63711 +fail_nomem_anon_vma_fork:
63712 + mpol_put(pol);
63713 +fail_nomem_policy:
63714 + kmem_cache_free(vm_area_cachep, tmp);
63715 +fail_nomem:
63716 + vm_unacct_memory(charge);
63717 + return NULL;
63718 +}
63719 +
63720 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63721 {
63722 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63723 struct rb_node **rb_link, *rb_parent;
63724 int retval;
63725 - unsigned long charge;
63726 - struct mempolicy *pol;
63727
63728 down_write(&oldmm->mmap_sem);
63729 flush_cache_dup_mm(oldmm);
63730 @@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63731 mm->locked_vm = 0;
63732 mm->mmap = NULL;
63733 mm->mmap_cache = NULL;
63734 - mm->free_area_cache = oldmm->mmap_base;
63735 - mm->cached_hole_size = ~0UL;
63736 + mm->free_area_cache = oldmm->free_area_cache;
63737 + mm->cached_hole_size = oldmm->cached_hole_size;
63738 mm->map_count = 0;
63739 cpumask_clear(mm_cpumask(mm));
63740 mm->mm_rb = RB_ROOT;
63741 @@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63742
63743 prev = NULL;
63744 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63745 - struct file *file;
63746 -
63747 if (mpnt->vm_flags & VM_DONTCOPY) {
63748 long pages = vma_pages(mpnt);
63749 mm->total_vm -= pages;
63750 @@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63751 -pages);
63752 continue;
63753 }
63754 - charge = 0;
63755 - if (mpnt->vm_flags & VM_ACCOUNT) {
63756 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63757 - if (security_vm_enough_memory(len))
63758 - goto fail_nomem;
63759 - charge = len;
63760 + tmp = dup_vma(mm, mpnt);
63761 + if (!tmp) {
63762 + retval = -ENOMEM;
63763 + goto out;
63764 }
63765 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63766 - if (!tmp)
63767 - goto fail_nomem;
63768 - *tmp = *mpnt;
63769 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63770 - pol = mpol_dup(vma_policy(mpnt));
63771 - retval = PTR_ERR(pol);
63772 - if (IS_ERR(pol))
63773 - goto fail_nomem_policy;
63774 - vma_set_policy(tmp, pol);
63775 - tmp->vm_mm = mm;
63776 - if (anon_vma_fork(tmp, mpnt))
63777 - goto fail_nomem_anon_vma_fork;
63778 - tmp->vm_flags &= ~VM_LOCKED;
63779 - tmp->vm_next = tmp->vm_prev = NULL;
63780 - file = tmp->vm_file;
63781 - if (file) {
63782 - struct inode *inode = file->f_path.dentry->d_inode;
63783 - struct address_space *mapping = file->f_mapping;
63784 -
63785 - get_file(file);
63786 - if (tmp->vm_flags & VM_DENYWRITE)
63787 - atomic_dec(&inode->i_writecount);
63788 - mutex_lock(&mapping->i_mmap_mutex);
63789 - if (tmp->vm_flags & VM_SHARED)
63790 - mapping->i_mmap_writable++;
63791 - flush_dcache_mmap_lock(mapping);
63792 - /* insert tmp into the share list, just after mpnt */
63793 - vma_prio_tree_add(tmp, mpnt);
63794 - flush_dcache_mmap_unlock(mapping);
63795 - mutex_unlock(&mapping->i_mmap_mutex);
63796 - }
63797 -
63798 - /*
63799 - * Clear hugetlb-related page reserves for children. This only
63800 - * affects MAP_PRIVATE mappings. Faults generated by the child
63801 - * are not guaranteed to succeed, even if read-only
63802 - */
63803 - if (is_vm_hugetlb_page(tmp))
63804 - reset_vma_resv_huge_pages(tmp);
63805
63806 /*
63807 * Link in the new vma and copy the page table entries.
63808 @@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63809 if (retval)
63810 goto out;
63811 }
63812 +
63813 +#ifdef CONFIG_PAX_SEGMEXEC
63814 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63815 + struct vm_area_struct *mpnt_m;
63816 +
63817 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63818 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63819 +
63820 + if (!mpnt->vm_mirror)
63821 + continue;
63822 +
63823 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63824 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63825 + mpnt->vm_mirror = mpnt_m;
63826 + } else {
63827 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63828 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63829 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63830 + mpnt->vm_mirror->vm_mirror = mpnt;
63831 + }
63832 + }
63833 + BUG_ON(mpnt_m);
63834 + }
63835 +#endif
63836 +
63837 /* a new mm has just been created */
63838 arch_dup_mmap(oldmm, mm);
63839 retval = 0;
63840 @@ -425,14 +470,6 @@ out:
63841 flush_tlb_mm(oldmm);
63842 up_write(&oldmm->mmap_sem);
63843 return retval;
63844 -fail_nomem_anon_vma_fork:
63845 - mpol_put(pol);
63846 -fail_nomem_policy:
63847 - kmem_cache_free(vm_area_cachep, tmp);
63848 -fail_nomem:
63849 - retval = -ENOMEM;
63850 - vm_unacct_memory(charge);
63851 - goto out;
63852 }
63853
63854 static inline int mm_alloc_pgd(struct mm_struct *mm)
63855 @@ -644,6 +681,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63856 }
63857 EXPORT_SYMBOL_GPL(get_task_mm);
63858
63859 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63860 +{
63861 + struct mm_struct *mm;
63862 + int err;
63863 +
63864 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63865 + if (err)
63866 + return ERR_PTR(err);
63867 +
63868 + mm = get_task_mm(task);
63869 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63870 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63871 + mmput(mm);
63872 + mm = ERR_PTR(-EACCES);
63873 + }
63874 + mutex_unlock(&task->signal->cred_guard_mutex);
63875 +
63876 + return mm;
63877 +}
63878 +
63879 /* Please note the differences between mmput and mm_release.
63880 * mmput is called whenever we stop holding onto a mm_struct,
63881 * error success whatever.
63882 @@ -829,13 +886,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63883 spin_unlock(&fs->lock);
63884 return -EAGAIN;
63885 }
63886 - fs->users++;
63887 + atomic_inc(&fs->users);
63888 spin_unlock(&fs->lock);
63889 return 0;
63890 }
63891 tsk->fs = copy_fs_struct(fs);
63892 if (!tsk->fs)
63893 return -ENOMEM;
63894 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63895 return 0;
63896 }
63897
63898 @@ -1097,6 +1155,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63899 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63900 #endif
63901 retval = -EAGAIN;
63902 +
63903 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63904 +
63905 if (atomic_read(&p->real_cred->user->processes) >=
63906 task_rlimit(p, RLIMIT_NPROC)) {
63907 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63908 @@ -1256,6 +1317,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63909 if (clone_flags & CLONE_THREAD)
63910 p->tgid = current->tgid;
63911
63912 + gr_copy_label(p);
63913 +
63914 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63915 /*
63916 * Clear TID on mm_release()?
63917 @@ -1418,6 +1481,8 @@ bad_fork_cleanup_count:
63918 bad_fork_free:
63919 free_task(p);
63920 fork_out:
63921 + gr_log_forkfail(retval);
63922 +
63923 return ERR_PTR(retval);
63924 }
63925
63926 @@ -1518,6 +1583,8 @@ long do_fork(unsigned long clone_flags,
63927 if (clone_flags & CLONE_PARENT_SETTID)
63928 put_user(nr, parent_tidptr);
63929
63930 + gr_handle_brute_check();
63931 +
63932 if (clone_flags & CLONE_VFORK) {
63933 p->vfork_done = &vfork;
63934 init_completion(&vfork);
63935 @@ -1627,7 +1694,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63936 return 0;
63937
63938 /* don't need lock here; in the worst case we'll do useless copy */
63939 - if (fs->users == 1)
63940 + if (atomic_read(&fs->users) == 1)
63941 return 0;
63942
63943 *new_fsp = copy_fs_struct(fs);
63944 @@ -1716,7 +1783,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63945 fs = current->fs;
63946 spin_lock(&fs->lock);
63947 current->fs = new_fs;
63948 - if (--fs->users)
63949 + gr_set_chroot_entries(current, &current->fs->root);
63950 + if (atomic_dec_return(&fs->users))
63951 new_fs = NULL;
63952 else
63953 new_fs = fs;
63954 diff --git a/kernel/futex.c b/kernel/futex.c
63955 index 1614be2..37abc7e 100644
63956 --- a/kernel/futex.c
63957 +++ b/kernel/futex.c
63958 @@ -54,6 +54,7 @@
63959 #include <linux/mount.h>
63960 #include <linux/pagemap.h>
63961 #include <linux/syscalls.h>
63962 +#include <linux/ptrace.h>
63963 #include <linux/signal.h>
63964 #include <linux/export.h>
63965 #include <linux/magic.h>
63966 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63967 struct page *page, *page_head;
63968 int err, ro = 0;
63969
63970 +#ifdef CONFIG_PAX_SEGMEXEC
63971 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63972 + return -EFAULT;
63973 +#endif
63974 +
63975 /*
63976 * The futex address must be "naturally" aligned.
63977 */
63978 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63979 if (!p)
63980 goto err_unlock;
63981 ret = -EPERM;
63982 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63983 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63984 + goto err_unlock;
63985 +#endif
63986 pcred = __task_cred(p);
63987 /* If victim is in different user_ns, then uids are not
63988 comparable, so we must have CAP_SYS_PTRACE */
63989 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63990 {
63991 u32 curval;
63992 int i;
63993 + mm_segment_t oldfs;
63994
63995 /*
63996 * This will fail and we want it. Some arch implementations do
63997 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63998 * implementation, the non-functional ones will return
63999 * -ENOSYS.
64000 */
64001 + oldfs = get_fs();
64002 + set_fs(USER_DS);
64003 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
64004 futex_cmpxchg_enabled = 1;
64005 + set_fs(oldfs);
64006
64007 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
64008 plist_head_init(&futex_queues[i].chain);
64009 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
64010 index 5f9e689..582d46d 100644
64011 --- a/kernel/futex_compat.c
64012 +++ b/kernel/futex_compat.c
64013 @@ -10,6 +10,7 @@
64014 #include <linux/compat.h>
64015 #include <linux/nsproxy.h>
64016 #include <linux/futex.h>
64017 +#include <linux/ptrace.h>
64018
64019 #include <asm/uaccess.h>
64020
64021 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64022 {
64023 struct compat_robust_list_head __user *head;
64024 unsigned long ret;
64025 - const struct cred *cred = current_cred(), *pcred;
64026 + const struct cred *cred = current_cred();
64027 + const struct cred *pcred;
64028
64029 if (!futex_cmpxchg_enabled)
64030 return -ENOSYS;
64031 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64032 if (!p)
64033 goto err_unlock;
64034 ret = -EPERM;
64035 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64036 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
64037 + goto err_unlock;
64038 +#endif
64039 pcred = __task_cred(p);
64040 /* If victim is in different user_ns, then uids are not
64041 comparable, so we must have CAP_SYS_PTRACE */
64042 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
64043 index 9b22d03..6295b62 100644
64044 --- a/kernel/gcov/base.c
64045 +++ b/kernel/gcov/base.c
64046 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
64047 }
64048
64049 #ifdef CONFIG_MODULES
64050 -static inline int within(void *addr, void *start, unsigned long size)
64051 -{
64052 - return ((addr >= start) && (addr < start + size));
64053 -}
64054 -
64055 /* Update list and generate events when modules are unloaded. */
64056 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64057 void *data)
64058 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64059 prev = NULL;
64060 /* Remove entries located in module from linked list. */
64061 for (info = gcov_info_head; info; info = info->next) {
64062 - if (within(info, mod->module_core, mod->core_size)) {
64063 + if (within_module_core_rw((unsigned long)info, mod)) {
64064 if (prev)
64065 prev->next = info->next;
64066 else
64067 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
64068 index ae34bf5..4e2f3d0 100644
64069 --- a/kernel/hrtimer.c
64070 +++ b/kernel/hrtimer.c
64071 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
64072 local_irq_restore(flags);
64073 }
64074
64075 -static void run_hrtimer_softirq(struct softirq_action *h)
64076 +static void run_hrtimer_softirq(void)
64077 {
64078 hrtimer_peek_ahead_timers();
64079 }
64080 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
64081 index 66ff710..05a5128 100644
64082 --- a/kernel/jump_label.c
64083 +++ b/kernel/jump_label.c
64084 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
64085
64086 size = (((unsigned long)stop - (unsigned long)start)
64087 / sizeof(struct jump_entry));
64088 + pax_open_kernel();
64089 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
64090 + pax_close_kernel();
64091 }
64092
64093 static void jump_label_update(struct jump_label_key *key, int enable);
64094 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
64095 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
64096 struct jump_entry *iter;
64097
64098 + pax_open_kernel();
64099 for (iter = iter_start; iter < iter_stop; iter++) {
64100 if (within_module_init(iter->code, mod))
64101 iter->code = 0;
64102 }
64103 + pax_close_kernel();
64104 }
64105
64106 static int
64107 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
64108 index 079f1d3..a407562 100644
64109 --- a/kernel/kallsyms.c
64110 +++ b/kernel/kallsyms.c
64111 @@ -11,6 +11,9 @@
64112 * Changed the compression method from stem compression to "table lookup"
64113 * compression (see scripts/kallsyms.c for a more complete description)
64114 */
64115 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64116 +#define __INCLUDED_BY_HIDESYM 1
64117 +#endif
64118 #include <linux/kallsyms.h>
64119 #include <linux/module.h>
64120 #include <linux/init.h>
64121 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
64122
64123 static inline int is_kernel_inittext(unsigned long addr)
64124 {
64125 + if (system_state != SYSTEM_BOOTING)
64126 + return 0;
64127 +
64128 if (addr >= (unsigned long)_sinittext
64129 && addr <= (unsigned long)_einittext)
64130 return 1;
64131 return 0;
64132 }
64133
64134 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64135 +#ifdef CONFIG_MODULES
64136 +static inline int is_module_text(unsigned long addr)
64137 +{
64138 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
64139 + return 1;
64140 +
64141 + addr = ktla_ktva(addr);
64142 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
64143 +}
64144 +#else
64145 +static inline int is_module_text(unsigned long addr)
64146 +{
64147 + return 0;
64148 +}
64149 +#endif
64150 +#endif
64151 +
64152 static inline int is_kernel_text(unsigned long addr)
64153 {
64154 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
64155 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
64156
64157 static inline int is_kernel(unsigned long addr)
64158 {
64159 +
64160 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64161 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
64162 + return 1;
64163 +
64164 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
64165 +#else
64166 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
64167 +#endif
64168 +
64169 return 1;
64170 return in_gate_area_no_mm(addr);
64171 }
64172
64173 static int is_ksym_addr(unsigned long addr)
64174 {
64175 +
64176 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64177 + if (is_module_text(addr))
64178 + return 0;
64179 +#endif
64180 +
64181 if (all_var)
64182 return is_kernel(addr);
64183
64184 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
64185
64186 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
64187 {
64188 - iter->name[0] = '\0';
64189 iter->nameoff = get_symbol_offset(new_pos);
64190 iter->pos = new_pos;
64191 }
64192 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
64193 {
64194 struct kallsym_iter *iter = m->private;
64195
64196 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64197 + if (current_uid())
64198 + return 0;
64199 +#endif
64200 +
64201 /* Some debugging symbols have no name. Ignore them. */
64202 if (!iter->name[0])
64203 return 0;
64204 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
64205 struct kallsym_iter *iter;
64206 int ret;
64207
64208 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
64209 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
64210 if (!iter)
64211 return -ENOMEM;
64212 reset_iter(iter, 0);
64213 diff --git a/kernel/kexec.c b/kernel/kexec.c
64214 index dc7bc08..4601964 100644
64215 --- a/kernel/kexec.c
64216 +++ b/kernel/kexec.c
64217 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
64218 unsigned long flags)
64219 {
64220 struct compat_kexec_segment in;
64221 - struct kexec_segment out, __user *ksegments;
64222 + struct kexec_segment out;
64223 + struct kexec_segment __user *ksegments;
64224 unsigned long i, result;
64225
64226 /* Don't allow clients that don't understand the native
64227 diff --git a/kernel/kmod.c b/kernel/kmod.c
64228 index a4bea97..7a1ae9a 100644
64229 --- a/kernel/kmod.c
64230 +++ b/kernel/kmod.c
64231 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
64232 * If module auto-loading support is disabled then this function
64233 * becomes a no-operation.
64234 */
64235 -int __request_module(bool wait, const char *fmt, ...)
64236 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
64237 {
64238 - va_list args;
64239 char module_name[MODULE_NAME_LEN];
64240 unsigned int max_modprobes;
64241 int ret;
64242 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
64243 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
64244 static char *envp[] = { "HOME=/",
64245 "TERM=linux",
64246 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
64247 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
64248 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
64249 static int kmod_loop_msg;
64250
64251 - va_start(args, fmt);
64252 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
64253 - va_end(args);
64254 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
64255 if (ret >= MODULE_NAME_LEN)
64256 return -ENAMETOOLONG;
64257
64258 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
64259 if (ret)
64260 return ret;
64261
64262 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64263 + if (!current_uid()) {
64264 + /* hack to workaround consolekit/udisks stupidity */
64265 + read_lock(&tasklist_lock);
64266 + if (!strcmp(current->comm, "mount") &&
64267 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
64268 + read_unlock(&tasklist_lock);
64269 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
64270 + return -EPERM;
64271 + }
64272 + read_unlock(&tasklist_lock);
64273 + }
64274 +#endif
64275 +
64276 /* If modprobe needs a service that is in a module, we get a recursive
64277 * loop. Limit the number of running kmod threads to max_threads/2 or
64278 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64279 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
64280 atomic_dec(&kmod_concurrent);
64281 return ret;
64282 }
64283 +
64284 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64285 +{
64286 + va_list args;
64287 + int ret;
64288 +
64289 + va_start(args, fmt);
64290 + ret = ____request_module(wait, module_param, fmt, args);
64291 + va_end(args);
64292 +
64293 + return ret;
64294 +}
64295 +
64296 +int __request_module(bool wait, const char *fmt, ...)
64297 +{
64298 + va_list args;
64299 + int ret;
64300 +
64301 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64302 + if (current_uid()) {
64303 + char module_param[MODULE_NAME_LEN];
64304 +
64305 + memset(module_param, 0, sizeof(module_param));
64306 +
64307 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64308 +
64309 + va_start(args, fmt);
64310 + ret = ____request_module(wait, module_param, fmt, args);
64311 + va_end(args);
64312 +
64313 + return ret;
64314 + }
64315 +#endif
64316 +
64317 + va_start(args, fmt);
64318 + ret = ____request_module(wait, NULL, fmt, args);
64319 + va_end(args);
64320 +
64321 + return ret;
64322 +}
64323 +
64324 EXPORT_SYMBOL(__request_module);
64325 #endif /* CONFIG_MODULES */
64326
64327 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64328 *
64329 * Thus the __user pointer cast is valid here.
64330 */
64331 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
64332 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64333
64334 /*
64335 * If ret is 0, either ____call_usermodehelper failed and the
64336 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64337 index faa39d1..d7ad37e 100644
64338 --- a/kernel/kprobes.c
64339 +++ b/kernel/kprobes.c
64340 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64341 * kernel image and loaded module images reside. This is required
64342 * so x86_64 can correctly handle the %rip-relative fixups.
64343 */
64344 - kip->insns = module_alloc(PAGE_SIZE);
64345 + kip->insns = module_alloc_exec(PAGE_SIZE);
64346 if (!kip->insns) {
64347 kfree(kip);
64348 return NULL;
64349 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64350 */
64351 if (!list_is_singular(&kip->list)) {
64352 list_del(&kip->list);
64353 - module_free(NULL, kip->insns);
64354 + module_free_exec(NULL, kip->insns);
64355 kfree(kip);
64356 }
64357 return 1;
64358 @@ -1953,7 +1953,7 @@ static int __init init_kprobes(void)
64359 {
64360 int i, err = 0;
64361 unsigned long offset = 0, size = 0;
64362 - char *modname, namebuf[128];
64363 + char *modname, namebuf[KSYM_NAME_LEN];
64364 const char *symbol_name;
64365 void *addr;
64366 struct kprobe_blackpoint *kb;
64367 @@ -2079,7 +2079,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64368 const char *sym = NULL;
64369 unsigned int i = *(loff_t *) v;
64370 unsigned long offset = 0;
64371 - char *modname, namebuf[128];
64372 + char *modname, namebuf[KSYM_NAME_LEN];
64373
64374 head = &kprobe_table[i];
64375 preempt_disable();
64376 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64377 index b2e08c9..01d8049 100644
64378 --- a/kernel/lockdep.c
64379 +++ b/kernel/lockdep.c
64380 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
64381 end = (unsigned long) &_end,
64382 addr = (unsigned long) obj;
64383
64384 +#ifdef CONFIG_PAX_KERNEXEC
64385 + start = ktla_ktva(start);
64386 +#endif
64387 +
64388 /*
64389 * static variable?
64390 */
64391 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64392 if (!static_obj(lock->key)) {
64393 debug_locks_off();
64394 printk("INFO: trying to register non-static key.\n");
64395 + printk("lock:%pS key:%pS.\n", lock, lock->key);
64396 printk("the code is fine but needs lockdep annotation.\n");
64397 printk("turning off the locking correctness validator.\n");
64398 dump_stack();
64399 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64400 if (!class)
64401 return 0;
64402 }
64403 - atomic_inc((atomic_t *)&class->ops);
64404 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64405 if (very_verbose(class)) {
64406 printk("\nacquire class [%p] %s", class->key, class->name);
64407 if (class->name_version > 1)
64408 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64409 index 91c32a0..b2c71c5 100644
64410 --- a/kernel/lockdep_proc.c
64411 +++ b/kernel/lockdep_proc.c
64412 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64413
64414 static void print_name(struct seq_file *m, struct lock_class *class)
64415 {
64416 - char str[128];
64417 + char str[KSYM_NAME_LEN];
64418 const char *name = class->name;
64419
64420 if (!name) {
64421 diff --git a/kernel/module.c b/kernel/module.c
64422 index 178333c..04e3408 100644
64423 --- a/kernel/module.c
64424 +++ b/kernel/module.c
64425 @@ -58,6 +58,7 @@
64426 #include <linux/jump_label.h>
64427 #include <linux/pfn.h>
64428 #include <linux/bsearch.h>
64429 +#include <linux/grsecurity.h>
64430
64431 #define CREATE_TRACE_POINTS
64432 #include <trace/events/module.h>
64433 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64434
64435 /* Bounds of module allocation, for speeding __module_address.
64436 * Protected by module_mutex. */
64437 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64438 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64439 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64440
64441 int register_module_notifier(struct notifier_block * nb)
64442 {
64443 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64444 return true;
64445
64446 list_for_each_entry_rcu(mod, &modules, list) {
64447 - struct symsearch arr[] = {
64448 + struct symsearch modarr[] = {
64449 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64450 NOT_GPL_ONLY, false },
64451 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64452 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64453 #endif
64454 };
64455
64456 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64457 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64458 return true;
64459 }
64460 return false;
64461 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64462 static int percpu_modalloc(struct module *mod,
64463 unsigned long size, unsigned long align)
64464 {
64465 - if (align > PAGE_SIZE) {
64466 + if (align-1 >= PAGE_SIZE) {
64467 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64468 mod->name, align, PAGE_SIZE);
64469 align = PAGE_SIZE;
64470 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64471 */
64472 #ifdef CONFIG_SYSFS
64473
64474 -#ifdef CONFIG_KALLSYMS
64475 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64476 static inline bool sect_empty(const Elf_Shdr *sect)
64477 {
64478 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64479 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64480
64481 static void unset_module_core_ro_nx(struct module *mod)
64482 {
64483 - set_page_attributes(mod->module_core + mod->core_text_size,
64484 - mod->module_core + mod->core_size,
64485 + set_page_attributes(mod->module_core_rw,
64486 + mod->module_core_rw + mod->core_size_rw,
64487 set_memory_x);
64488 - set_page_attributes(mod->module_core,
64489 - mod->module_core + mod->core_ro_size,
64490 + set_page_attributes(mod->module_core_rx,
64491 + mod->module_core_rx + mod->core_size_rx,
64492 set_memory_rw);
64493 }
64494
64495 static void unset_module_init_ro_nx(struct module *mod)
64496 {
64497 - set_page_attributes(mod->module_init + mod->init_text_size,
64498 - mod->module_init + mod->init_size,
64499 + set_page_attributes(mod->module_init_rw,
64500 + mod->module_init_rw + mod->init_size_rw,
64501 set_memory_x);
64502 - set_page_attributes(mod->module_init,
64503 - mod->module_init + mod->init_ro_size,
64504 + set_page_attributes(mod->module_init_rx,
64505 + mod->module_init_rx + mod->init_size_rx,
64506 set_memory_rw);
64507 }
64508
64509 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64510
64511 mutex_lock(&module_mutex);
64512 list_for_each_entry_rcu(mod, &modules, list) {
64513 - if ((mod->module_core) && (mod->core_text_size)) {
64514 - set_page_attributes(mod->module_core,
64515 - mod->module_core + mod->core_text_size,
64516 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64517 + set_page_attributes(mod->module_core_rx,
64518 + mod->module_core_rx + mod->core_size_rx,
64519 set_memory_rw);
64520 }
64521 - if ((mod->module_init) && (mod->init_text_size)) {
64522 - set_page_attributes(mod->module_init,
64523 - mod->module_init + mod->init_text_size,
64524 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64525 + set_page_attributes(mod->module_init_rx,
64526 + mod->module_init_rx + mod->init_size_rx,
64527 set_memory_rw);
64528 }
64529 }
64530 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64531
64532 mutex_lock(&module_mutex);
64533 list_for_each_entry_rcu(mod, &modules, list) {
64534 - if ((mod->module_core) && (mod->core_text_size)) {
64535 - set_page_attributes(mod->module_core,
64536 - mod->module_core + mod->core_text_size,
64537 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64538 + set_page_attributes(mod->module_core_rx,
64539 + mod->module_core_rx + mod->core_size_rx,
64540 set_memory_ro);
64541 }
64542 - if ((mod->module_init) && (mod->init_text_size)) {
64543 - set_page_attributes(mod->module_init,
64544 - mod->module_init + mod->init_text_size,
64545 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64546 + set_page_attributes(mod->module_init_rx,
64547 + mod->module_init_rx + mod->init_size_rx,
64548 set_memory_ro);
64549 }
64550 }
64551 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64552
64553 /* This may be NULL, but that's OK */
64554 unset_module_init_ro_nx(mod);
64555 - module_free(mod, mod->module_init);
64556 + module_free(mod, mod->module_init_rw);
64557 + module_free_exec(mod, mod->module_init_rx);
64558 kfree(mod->args);
64559 percpu_modfree(mod);
64560
64561 /* Free lock-classes: */
64562 - lockdep_free_key_range(mod->module_core, mod->core_size);
64563 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64564 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64565
64566 /* Finally, free the core (containing the module structure) */
64567 unset_module_core_ro_nx(mod);
64568 - module_free(mod, mod->module_core);
64569 + module_free_exec(mod, mod->module_core_rx);
64570 + module_free(mod, mod->module_core_rw);
64571
64572 #ifdef CONFIG_MPU
64573 update_protections(current->mm);
64574 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64575 unsigned int i;
64576 int ret = 0;
64577 const struct kernel_symbol *ksym;
64578 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64579 + int is_fs_load = 0;
64580 + int register_filesystem_found = 0;
64581 + char *p;
64582 +
64583 + p = strstr(mod->args, "grsec_modharden_fs");
64584 + if (p) {
64585 + char *endptr = p + strlen("grsec_modharden_fs");
64586 + /* copy \0 as well */
64587 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64588 + is_fs_load = 1;
64589 + }
64590 +#endif
64591
64592 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64593 const char *name = info->strtab + sym[i].st_name;
64594
64595 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64596 + /* it's a real shame this will never get ripped and copied
64597 + upstream! ;(
64598 + */
64599 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64600 + register_filesystem_found = 1;
64601 +#endif
64602 +
64603 switch (sym[i].st_shndx) {
64604 case SHN_COMMON:
64605 /* We compiled with -fno-common. These are not
64606 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64607 ksym = resolve_symbol_wait(mod, info, name);
64608 /* Ok if resolved. */
64609 if (ksym && !IS_ERR(ksym)) {
64610 + pax_open_kernel();
64611 sym[i].st_value = ksym->value;
64612 + pax_close_kernel();
64613 break;
64614 }
64615
64616 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64617 secbase = (unsigned long)mod_percpu(mod);
64618 else
64619 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64620 + pax_open_kernel();
64621 sym[i].st_value += secbase;
64622 + pax_close_kernel();
64623 break;
64624 }
64625 }
64626
64627 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64628 + if (is_fs_load && !register_filesystem_found) {
64629 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64630 + ret = -EPERM;
64631 + }
64632 +#endif
64633 +
64634 return ret;
64635 }
64636
64637 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64638 || s->sh_entsize != ~0UL
64639 || strstarts(sname, ".init"))
64640 continue;
64641 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64642 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64643 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64644 + else
64645 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64646 DEBUGP("\t%s\n", name);
64647 }
64648 - switch (m) {
64649 - case 0: /* executable */
64650 - mod->core_size = debug_align(mod->core_size);
64651 - mod->core_text_size = mod->core_size;
64652 - break;
64653 - case 1: /* RO: text and ro-data */
64654 - mod->core_size = debug_align(mod->core_size);
64655 - mod->core_ro_size = mod->core_size;
64656 - break;
64657 - case 3: /* whole core */
64658 - mod->core_size = debug_align(mod->core_size);
64659 - break;
64660 - }
64661 }
64662
64663 DEBUGP("Init section allocation order:\n");
64664 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64665 || s->sh_entsize != ~0UL
64666 || !strstarts(sname, ".init"))
64667 continue;
64668 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64669 - | INIT_OFFSET_MASK);
64670 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64671 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64672 + else
64673 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64674 + s->sh_entsize |= INIT_OFFSET_MASK;
64675 DEBUGP("\t%s\n", sname);
64676 }
64677 - switch (m) {
64678 - case 0: /* executable */
64679 - mod->init_size = debug_align(mod->init_size);
64680 - mod->init_text_size = mod->init_size;
64681 - break;
64682 - case 1: /* RO: text and ro-data */
64683 - mod->init_size = debug_align(mod->init_size);
64684 - mod->init_ro_size = mod->init_size;
64685 - break;
64686 - case 3: /* whole init */
64687 - mod->init_size = debug_align(mod->init_size);
64688 - break;
64689 - }
64690 }
64691 }
64692
64693 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64694
64695 /* Put symbol section at end of init part of module. */
64696 symsect->sh_flags |= SHF_ALLOC;
64697 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64698 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64699 info->index.sym) | INIT_OFFSET_MASK;
64700 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64701
64702 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64703 }
64704
64705 /* Append room for core symbols at end of core part. */
64706 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64707 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64708 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64709 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64710
64711 /* Put string table section at end of init part of module. */
64712 strsect->sh_flags |= SHF_ALLOC;
64713 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64714 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64715 info->index.str) | INIT_OFFSET_MASK;
64716 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64717
64718 /* Append room for core symbols' strings at end of core part. */
64719 - info->stroffs = mod->core_size;
64720 + info->stroffs = mod->core_size_rx;
64721 __set_bit(0, info->strmap);
64722 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64723 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64724 }
64725
64726 static void add_kallsyms(struct module *mod, const struct load_info *info)
64727 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64728 /* Make sure we get permanent strtab: don't use info->strtab. */
64729 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64730
64731 + pax_open_kernel();
64732 +
64733 /* Set types up while we still have access to sections. */
64734 for (i = 0; i < mod->num_symtab; i++)
64735 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64736
64737 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64738 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64739 src = mod->symtab;
64740 *dst = *src;
64741 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64742 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64743 }
64744 mod->core_num_syms = ndst;
64745
64746 - mod->core_strtab = s = mod->module_core + info->stroffs;
64747 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64748 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64749 if (test_bit(i, info->strmap))
64750 *++s = mod->strtab[i];
64751 +
64752 + pax_close_kernel();
64753 }
64754 #else
64755 static inline void layout_symtab(struct module *mod, struct load_info *info)
64756 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64757 return size == 0 ? NULL : vmalloc_exec(size);
64758 }
64759
64760 -static void *module_alloc_update_bounds(unsigned long size)
64761 +static void *module_alloc_update_bounds_rw(unsigned long size)
64762 {
64763 void *ret = module_alloc(size);
64764
64765 if (ret) {
64766 mutex_lock(&module_mutex);
64767 /* Update module bounds. */
64768 - if ((unsigned long)ret < module_addr_min)
64769 - module_addr_min = (unsigned long)ret;
64770 - if ((unsigned long)ret + size > module_addr_max)
64771 - module_addr_max = (unsigned long)ret + size;
64772 + if ((unsigned long)ret < module_addr_min_rw)
64773 + module_addr_min_rw = (unsigned long)ret;
64774 + if ((unsigned long)ret + size > module_addr_max_rw)
64775 + module_addr_max_rw = (unsigned long)ret + size;
64776 + mutex_unlock(&module_mutex);
64777 + }
64778 + return ret;
64779 +}
64780 +
64781 +static void *module_alloc_update_bounds_rx(unsigned long size)
64782 +{
64783 + void *ret = module_alloc_exec(size);
64784 +
64785 + if (ret) {
64786 + mutex_lock(&module_mutex);
64787 + /* Update module bounds. */
64788 + if ((unsigned long)ret < module_addr_min_rx)
64789 + module_addr_min_rx = (unsigned long)ret;
64790 + if ((unsigned long)ret + size > module_addr_max_rx)
64791 + module_addr_max_rx = (unsigned long)ret + size;
64792 mutex_unlock(&module_mutex);
64793 }
64794 return ret;
64795 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64796 static int check_modinfo(struct module *mod, struct load_info *info)
64797 {
64798 const char *modmagic = get_modinfo(info, "vermagic");
64799 + const char *license = get_modinfo(info, "license");
64800 int err;
64801
64802 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64803 + if (!license || !license_is_gpl_compatible(license))
64804 + return -ENOEXEC;
64805 +#endif
64806 +
64807 /* This is allowed: modprobe --force will invalidate it. */
64808 if (!modmagic) {
64809 err = try_to_force_load(mod, "bad vermagic");
64810 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64811 }
64812
64813 /* Set up license info based on the info section */
64814 - set_license(mod, get_modinfo(info, "license"));
64815 + set_license(mod, license);
64816
64817 return 0;
64818 }
64819 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64820 void *ptr;
64821
64822 /* Do the allocs. */
64823 - ptr = module_alloc_update_bounds(mod->core_size);
64824 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64825 /*
64826 * The pointer to this block is stored in the module structure
64827 * which is inside the block. Just mark it as not being a
64828 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64829 if (!ptr)
64830 return -ENOMEM;
64831
64832 - memset(ptr, 0, mod->core_size);
64833 - mod->module_core = ptr;
64834 + memset(ptr, 0, mod->core_size_rw);
64835 + mod->module_core_rw = ptr;
64836
64837 - ptr = module_alloc_update_bounds(mod->init_size);
64838 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64839 /*
64840 * The pointer to this block is stored in the module structure
64841 * which is inside the block. This block doesn't need to be
64842 * scanned as it contains data and code that will be freed
64843 * after the module is initialized.
64844 */
64845 - kmemleak_ignore(ptr);
64846 - if (!ptr && mod->init_size) {
64847 - module_free(mod, mod->module_core);
64848 + kmemleak_not_leak(ptr);
64849 + if (!ptr && mod->init_size_rw) {
64850 + module_free(mod, mod->module_core_rw);
64851 return -ENOMEM;
64852 }
64853 - memset(ptr, 0, mod->init_size);
64854 - mod->module_init = ptr;
64855 + memset(ptr, 0, mod->init_size_rw);
64856 + mod->module_init_rw = ptr;
64857 +
64858 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64859 + kmemleak_not_leak(ptr);
64860 + if (!ptr) {
64861 + module_free(mod, mod->module_init_rw);
64862 + module_free(mod, mod->module_core_rw);
64863 + return -ENOMEM;
64864 + }
64865 +
64866 + pax_open_kernel();
64867 + memset(ptr, 0, mod->core_size_rx);
64868 + pax_close_kernel();
64869 + mod->module_core_rx = ptr;
64870 +
64871 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64872 + kmemleak_not_leak(ptr);
64873 + if (!ptr && mod->init_size_rx) {
64874 + module_free_exec(mod, mod->module_core_rx);
64875 + module_free(mod, mod->module_init_rw);
64876 + module_free(mod, mod->module_core_rw);
64877 + return -ENOMEM;
64878 + }
64879 +
64880 + pax_open_kernel();
64881 + memset(ptr, 0, mod->init_size_rx);
64882 + pax_close_kernel();
64883 + mod->module_init_rx = ptr;
64884
64885 /* Transfer each section which specifies SHF_ALLOC */
64886 DEBUGP("final section addresses:\n");
64887 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64888 if (!(shdr->sh_flags & SHF_ALLOC))
64889 continue;
64890
64891 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64892 - dest = mod->module_init
64893 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64894 - else
64895 - dest = mod->module_core + shdr->sh_entsize;
64896 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64897 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64898 + dest = mod->module_init_rw
64899 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64900 + else
64901 + dest = mod->module_init_rx
64902 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64903 + } else {
64904 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64905 + dest = mod->module_core_rw + shdr->sh_entsize;
64906 + else
64907 + dest = mod->module_core_rx + shdr->sh_entsize;
64908 + }
64909 +
64910 + if (shdr->sh_type != SHT_NOBITS) {
64911 +
64912 +#ifdef CONFIG_PAX_KERNEXEC
64913 +#ifdef CONFIG_X86_64
64914 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64915 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64916 +#endif
64917 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64918 + pax_open_kernel();
64919 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64920 + pax_close_kernel();
64921 + } else
64922 +#endif
64923
64924 - if (shdr->sh_type != SHT_NOBITS)
64925 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64926 + }
64927 /* Update sh_addr to point to copy in image. */
64928 - shdr->sh_addr = (unsigned long)dest;
64929 +
64930 +#ifdef CONFIG_PAX_KERNEXEC
64931 + if (shdr->sh_flags & SHF_EXECINSTR)
64932 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64933 + else
64934 +#endif
64935 +
64936 + shdr->sh_addr = (unsigned long)dest;
64937 DEBUGP("\t0x%lx %s\n",
64938 shdr->sh_addr, info->secstrings + shdr->sh_name);
64939 }
64940 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64941 * Do it before processing of module parameters, so the module
64942 * can provide parameter accessor functions of its own.
64943 */
64944 - if (mod->module_init)
64945 - flush_icache_range((unsigned long)mod->module_init,
64946 - (unsigned long)mod->module_init
64947 - + mod->init_size);
64948 - flush_icache_range((unsigned long)mod->module_core,
64949 - (unsigned long)mod->module_core + mod->core_size);
64950 + if (mod->module_init_rx)
64951 + flush_icache_range((unsigned long)mod->module_init_rx,
64952 + (unsigned long)mod->module_init_rx
64953 + + mod->init_size_rx);
64954 + flush_icache_range((unsigned long)mod->module_core_rx,
64955 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64956
64957 set_fs(old_fs);
64958 }
64959 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64960 {
64961 kfree(info->strmap);
64962 percpu_modfree(mod);
64963 - module_free(mod, mod->module_init);
64964 - module_free(mod, mod->module_core);
64965 + module_free_exec(mod, mod->module_init_rx);
64966 + module_free_exec(mod, mod->module_core_rx);
64967 + module_free(mod, mod->module_init_rw);
64968 + module_free(mod, mod->module_core_rw);
64969 }
64970
64971 int __weak module_finalize(const Elf_Ehdr *hdr,
64972 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64973 if (err)
64974 goto free_unload;
64975
64976 + /* Now copy in args */
64977 + mod->args = strndup_user(uargs, ~0UL >> 1);
64978 + if (IS_ERR(mod->args)) {
64979 + err = PTR_ERR(mod->args);
64980 + goto free_unload;
64981 + }
64982 +
64983 /* Set up MODINFO_ATTR fields */
64984 setup_modinfo(mod, &info);
64985
64986 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64987 + {
64988 + char *p, *p2;
64989 +
64990 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64991 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64992 + err = -EPERM;
64993 + goto free_modinfo;
64994 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64995 + p += strlen("grsec_modharden_normal");
64996 + p2 = strstr(p, "_");
64997 + if (p2) {
64998 + *p2 = '\0';
64999 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
65000 + *p2 = '_';
65001 + }
65002 + err = -EPERM;
65003 + goto free_modinfo;
65004 + }
65005 + }
65006 +#endif
65007 +
65008 /* Fix up syms, so that st_value is a pointer to location. */
65009 err = simplify_symbols(mod, &info);
65010 if (err < 0)
65011 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
65012
65013 flush_module_icache(mod);
65014
65015 - /* Now copy in args */
65016 - mod->args = strndup_user(uargs, ~0UL >> 1);
65017 - if (IS_ERR(mod->args)) {
65018 - err = PTR_ERR(mod->args);
65019 - goto free_arch_cleanup;
65020 - }
65021 -
65022 /* Mark state as coming so strong_try_module_get() ignores us. */
65023 mod->state = MODULE_STATE_COMING;
65024
65025 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
65026 unlock:
65027 mutex_unlock(&module_mutex);
65028 synchronize_sched();
65029 - kfree(mod->args);
65030 - free_arch_cleanup:
65031 module_arch_cleanup(mod);
65032 free_modinfo:
65033 free_modinfo(mod);
65034 + kfree(mod->args);
65035 free_unload:
65036 module_unload_free(mod);
65037 free_module:
65038 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65039 MODULE_STATE_COMING, mod);
65040
65041 /* Set RO and NX regions for core */
65042 - set_section_ro_nx(mod->module_core,
65043 - mod->core_text_size,
65044 - mod->core_ro_size,
65045 - mod->core_size);
65046 + set_section_ro_nx(mod->module_core_rx,
65047 + mod->core_size_rx,
65048 + mod->core_size_rx,
65049 + mod->core_size_rx);
65050
65051 /* Set RO and NX regions for init */
65052 - set_section_ro_nx(mod->module_init,
65053 - mod->init_text_size,
65054 - mod->init_ro_size,
65055 - mod->init_size);
65056 + set_section_ro_nx(mod->module_init_rx,
65057 + mod->init_size_rx,
65058 + mod->init_size_rx,
65059 + mod->init_size_rx);
65060
65061 do_mod_ctors(mod);
65062 /* Start the module */
65063 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65064 mod->strtab = mod->core_strtab;
65065 #endif
65066 unset_module_init_ro_nx(mod);
65067 - module_free(mod, mod->module_init);
65068 - mod->module_init = NULL;
65069 - mod->init_size = 0;
65070 - mod->init_ro_size = 0;
65071 - mod->init_text_size = 0;
65072 + module_free(mod, mod->module_init_rw);
65073 + module_free_exec(mod, mod->module_init_rx);
65074 + mod->module_init_rw = NULL;
65075 + mod->module_init_rx = NULL;
65076 + mod->init_size_rw = 0;
65077 + mod->init_size_rx = 0;
65078 mutex_unlock(&module_mutex);
65079
65080 return 0;
65081 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
65082 unsigned long nextval;
65083
65084 /* At worse, next value is at end of module */
65085 - if (within_module_init(addr, mod))
65086 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
65087 + if (within_module_init_rx(addr, mod))
65088 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
65089 + else if (within_module_init_rw(addr, mod))
65090 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
65091 + else if (within_module_core_rx(addr, mod))
65092 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
65093 + else if (within_module_core_rw(addr, mod))
65094 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
65095 else
65096 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
65097 + return NULL;
65098
65099 /* Scan for closest preceding symbol, and next symbol. (ELF
65100 starts real symbols at 1). */
65101 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
65102 char buf[8];
65103
65104 seq_printf(m, "%s %u",
65105 - mod->name, mod->init_size + mod->core_size);
65106 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
65107 print_unload_info(m, mod);
65108
65109 /* Informative for users. */
65110 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
65111 mod->state == MODULE_STATE_COMING ? "Loading":
65112 "Live");
65113 /* Used by oprofile and other similar tools. */
65114 - seq_printf(m, " 0x%pK", mod->module_core);
65115 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
65116
65117 /* Taints info */
65118 if (mod->taints)
65119 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
65120
65121 static int __init proc_modules_init(void)
65122 {
65123 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65124 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65125 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65126 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65127 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
65128 +#else
65129 proc_create("modules", 0, NULL, &proc_modules_operations);
65130 +#endif
65131 +#else
65132 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65133 +#endif
65134 return 0;
65135 }
65136 module_init(proc_modules_init);
65137 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
65138 {
65139 struct module *mod;
65140
65141 - if (addr < module_addr_min || addr > module_addr_max)
65142 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
65143 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
65144 return NULL;
65145
65146 list_for_each_entry_rcu(mod, &modules, list)
65147 - if (within_module_core(addr, mod)
65148 - || within_module_init(addr, mod))
65149 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
65150 return mod;
65151 return NULL;
65152 }
65153 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
65154 */
65155 struct module *__module_text_address(unsigned long addr)
65156 {
65157 - struct module *mod = __module_address(addr);
65158 + struct module *mod;
65159 +
65160 +#ifdef CONFIG_X86_32
65161 + addr = ktla_ktva(addr);
65162 +#endif
65163 +
65164 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
65165 + return NULL;
65166 +
65167 + mod = __module_address(addr);
65168 +
65169 if (mod) {
65170 /* Make sure it's within the text section. */
65171 - if (!within(addr, mod->module_init, mod->init_text_size)
65172 - && !within(addr, mod->module_core, mod->core_text_size))
65173 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
65174 mod = NULL;
65175 }
65176 return mod;
65177 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
65178 index 7e3443f..b2a1e6b 100644
65179 --- a/kernel/mutex-debug.c
65180 +++ b/kernel/mutex-debug.c
65181 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
65182 }
65183
65184 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65185 - struct thread_info *ti)
65186 + struct task_struct *task)
65187 {
65188 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
65189
65190 /* Mark the current thread as blocked on the lock: */
65191 - ti->task->blocked_on = waiter;
65192 + task->blocked_on = waiter;
65193 }
65194
65195 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65196 - struct thread_info *ti)
65197 + struct task_struct *task)
65198 {
65199 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
65200 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
65201 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
65202 - ti->task->blocked_on = NULL;
65203 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
65204 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
65205 + task->blocked_on = NULL;
65206
65207 list_del_init(&waiter->list);
65208 waiter->task = NULL;
65209 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
65210 index 0799fd3..d06ae3b 100644
65211 --- a/kernel/mutex-debug.h
65212 +++ b/kernel/mutex-debug.h
65213 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
65214 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
65215 extern void debug_mutex_add_waiter(struct mutex *lock,
65216 struct mutex_waiter *waiter,
65217 - struct thread_info *ti);
65218 + struct task_struct *task);
65219 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65220 - struct thread_info *ti);
65221 + struct task_struct *task);
65222 extern void debug_mutex_unlock(struct mutex *lock);
65223 extern void debug_mutex_init(struct mutex *lock, const char *name,
65224 struct lock_class_key *key);
65225 diff --git a/kernel/mutex.c b/kernel/mutex.c
65226 index 89096dd..f91ebc5 100644
65227 --- a/kernel/mutex.c
65228 +++ b/kernel/mutex.c
65229 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65230 spin_lock_mutex(&lock->wait_lock, flags);
65231
65232 debug_mutex_lock_common(lock, &waiter);
65233 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
65234 + debug_mutex_add_waiter(lock, &waiter, task);
65235
65236 /* add waiting tasks to the end of the waitqueue (FIFO): */
65237 list_add_tail(&waiter.list, &lock->wait_list);
65238 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65239 * TASK_UNINTERRUPTIBLE case.)
65240 */
65241 if (unlikely(signal_pending_state(state, task))) {
65242 - mutex_remove_waiter(lock, &waiter,
65243 - task_thread_info(task));
65244 + mutex_remove_waiter(lock, &waiter, task);
65245 mutex_release(&lock->dep_map, 1, ip);
65246 spin_unlock_mutex(&lock->wait_lock, flags);
65247
65248 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65249 done:
65250 lock_acquired(&lock->dep_map, ip);
65251 /* got the lock - rejoice! */
65252 - mutex_remove_waiter(lock, &waiter, current_thread_info());
65253 + mutex_remove_waiter(lock, &waiter, task);
65254 mutex_set_owner(lock);
65255
65256 /* set it to 0 if there are no waiters left: */
65257 diff --git a/kernel/padata.c b/kernel/padata.c
65258 index b452599..5d68f4e 100644
65259 --- a/kernel/padata.c
65260 +++ b/kernel/padata.c
65261 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
65262 padata->pd = pd;
65263 padata->cb_cpu = cb_cpu;
65264
65265 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
65266 - atomic_set(&pd->seq_nr, -1);
65267 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
65268 + atomic_set_unchecked(&pd->seq_nr, -1);
65269
65270 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
65271 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
65272
65273 target_cpu = padata_cpu_hash(padata);
65274 queue = per_cpu_ptr(pd->pqueue, target_cpu);
65275 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
65276 padata_init_pqueues(pd);
65277 padata_init_squeues(pd);
65278 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65279 - atomic_set(&pd->seq_nr, -1);
65280 + atomic_set_unchecked(&pd->seq_nr, -1);
65281 atomic_set(&pd->reorder_objects, 0);
65282 atomic_set(&pd->refcnt, 0);
65283 pd->pinst = pinst;
65284 diff --git a/kernel/panic.c b/kernel/panic.c
65285 index 3458469..342c500 100644
65286 --- a/kernel/panic.c
65287 +++ b/kernel/panic.c
65288 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65289 va_end(args);
65290 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65291 #ifdef CONFIG_DEBUG_BUGVERBOSE
65292 - dump_stack();
65293 + /*
65294 + * Avoid nested stack-dumping if a panic occurs during oops processing
65295 + */
65296 + if (!oops_in_progress)
65297 + dump_stack();
65298 #endif
65299
65300 /*
65301 @@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65302 const char *board;
65303
65304 printk(KERN_WARNING "------------[ cut here ]------------\n");
65305 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65306 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65307 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65308 if (board)
65309 printk(KERN_WARNING "Hardware name: %s\n", board);
65310 @@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65311 */
65312 void __stack_chk_fail(void)
65313 {
65314 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
65315 + dump_stack();
65316 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65317 __builtin_return_address(0));
65318 }
65319 EXPORT_SYMBOL(__stack_chk_fail);
65320 diff --git a/kernel/pid.c b/kernel/pid.c
65321 index fa5f722..0c93e57 100644
65322 --- a/kernel/pid.c
65323 +++ b/kernel/pid.c
65324 @@ -33,6 +33,7 @@
65325 #include <linux/rculist.h>
65326 #include <linux/bootmem.h>
65327 #include <linux/hash.h>
65328 +#include <linux/security.h>
65329 #include <linux/pid_namespace.h>
65330 #include <linux/init_task.h>
65331 #include <linux/syscalls.h>
65332 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65333
65334 int pid_max = PID_MAX_DEFAULT;
65335
65336 -#define RESERVED_PIDS 300
65337 +#define RESERVED_PIDS 500
65338
65339 int pid_max_min = RESERVED_PIDS + 1;
65340 int pid_max_max = PID_MAX_LIMIT;
65341 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65342 */
65343 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65344 {
65345 + struct task_struct *task;
65346 +
65347 rcu_lockdep_assert(rcu_read_lock_held(),
65348 "find_task_by_pid_ns() needs rcu_read_lock()"
65349 " protection");
65350 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65351 +
65352 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65353 +
65354 + if (gr_pid_is_chrooted(task))
65355 + return NULL;
65356 +
65357 + return task;
65358 }
65359
65360 struct task_struct *find_task_by_vpid(pid_t vnr)
65361 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65362 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65363 }
65364
65365 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65366 +{
65367 + rcu_lockdep_assert(rcu_read_lock_held(),
65368 + "find_task_by_pid_ns() needs rcu_read_lock()"
65369 + " protection");
65370 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65371 +}
65372 +
65373 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65374 {
65375 struct pid *pid;
65376 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65377 index e7cb76d..75eceb3 100644
65378 --- a/kernel/posix-cpu-timers.c
65379 +++ b/kernel/posix-cpu-timers.c
65380 @@ -6,6 +6,7 @@
65381 #include <linux/posix-timers.h>
65382 #include <linux/errno.h>
65383 #include <linux/math64.h>
65384 +#include <linux/security.h>
65385 #include <asm/uaccess.h>
65386 #include <linux/kernel_stat.h>
65387 #include <trace/events/timer.h>
65388 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65389
65390 static __init int init_posix_cpu_timers(void)
65391 {
65392 - struct k_clock process = {
65393 + static struct k_clock process = {
65394 .clock_getres = process_cpu_clock_getres,
65395 .clock_get = process_cpu_clock_get,
65396 .timer_create = process_cpu_timer_create,
65397 .nsleep = process_cpu_nsleep,
65398 .nsleep_restart = process_cpu_nsleep_restart,
65399 };
65400 - struct k_clock thread = {
65401 + static struct k_clock thread = {
65402 .clock_getres = thread_cpu_clock_getres,
65403 .clock_get = thread_cpu_clock_get,
65404 .timer_create = thread_cpu_timer_create,
65405 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65406 index 69185ae..cc2847a 100644
65407 --- a/kernel/posix-timers.c
65408 +++ b/kernel/posix-timers.c
65409 @@ -43,6 +43,7 @@
65410 #include <linux/idr.h>
65411 #include <linux/posix-clock.h>
65412 #include <linux/posix-timers.h>
65413 +#include <linux/grsecurity.h>
65414 #include <linux/syscalls.h>
65415 #include <linux/wait.h>
65416 #include <linux/workqueue.h>
65417 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65418 * which we beg off on and pass to do_sys_settimeofday().
65419 */
65420
65421 -static struct k_clock posix_clocks[MAX_CLOCKS];
65422 +static struct k_clock *posix_clocks[MAX_CLOCKS];
65423
65424 /*
65425 * These ones are defined below.
65426 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65427 */
65428 static __init int init_posix_timers(void)
65429 {
65430 - struct k_clock clock_realtime = {
65431 + static struct k_clock clock_realtime = {
65432 .clock_getres = hrtimer_get_res,
65433 .clock_get = posix_clock_realtime_get,
65434 .clock_set = posix_clock_realtime_set,
65435 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65436 .timer_get = common_timer_get,
65437 .timer_del = common_timer_del,
65438 };
65439 - struct k_clock clock_monotonic = {
65440 + static struct k_clock clock_monotonic = {
65441 .clock_getres = hrtimer_get_res,
65442 .clock_get = posix_ktime_get_ts,
65443 .nsleep = common_nsleep,
65444 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65445 .timer_get = common_timer_get,
65446 .timer_del = common_timer_del,
65447 };
65448 - struct k_clock clock_monotonic_raw = {
65449 + static struct k_clock clock_monotonic_raw = {
65450 .clock_getres = hrtimer_get_res,
65451 .clock_get = posix_get_monotonic_raw,
65452 };
65453 - struct k_clock clock_realtime_coarse = {
65454 + static struct k_clock clock_realtime_coarse = {
65455 .clock_getres = posix_get_coarse_res,
65456 .clock_get = posix_get_realtime_coarse,
65457 };
65458 - struct k_clock clock_monotonic_coarse = {
65459 + static struct k_clock clock_monotonic_coarse = {
65460 .clock_getres = posix_get_coarse_res,
65461 .clock_get = posix_get_monotonic_coarse,
65462 };
65463 - struct k_clock clock_boottime = {
65464 + static struct k_clock clock_boottime = {
65465 .clock_getres = hrtimer_get_res,
65466 .clock_get = posix_get_boottime,
65467 .nsleep = common_nsleep,
65468 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65469 return;
65470 }
65471
65472 - posix_clocks[clock_id] = *new_clock;
65473 + posix_clocks[clock_id] = new_clock;
65474 }
65475 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65476
65477 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65478 return (id & CLOCKFD_MASK) == CLOCKFD ?
65479 &clock_posix_dynamic : &clock_posix_cpu;
65480
65481 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65482 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65483 return NULL;
65484 - return &posix_clocks[id];
65485 + return posix_clocks[id];
65486 }
65487
65488 static int common_timer_create(struct k_itimer *new_timer)
65489 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65490 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65491 return -EFAULT;
65492
65493 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65494 + have their clock_set fptr set to a nosettime dummy function
65495 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65496 + call common_clock_set, which calls do_sys_settimeofday, which
65497 + we hook
65498 + */
65499 +
65500 return kc->clock_set(which_clock, &new_tp);
65501 }
65502
65503 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65504 index d523593..68197a4 100644
65505 --- a/kernel/power/poweroff.c
65506 +++ b/kernel/power/poweroff.c
65507 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65508 .enable_mask = SYSRQ_ENABLE_BOOT,
65509 };
65510
65511 -static int pm_sysrq_init(void)
65512 +static int __init pm_sysrq_init(void)
65513 {
65514 register_sysrq_key('o', &sysrq_poweroff_op);
65515 return 0;
65516 diff --git a/kernel/power/process.c b/kernel/power/process.c
65517 index 3d4b954..11af930 100644
65518 --- a/kernel/power/process.c
65519 +++ b/kernel/power/process.c
65520 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65521 u64 elapsed_csecs64;
65522 unsigned int elapsed_csecs;
65523 bool wakeup = false;
65524 + bool timedout = false;
65525
65526 do_gettimeofday(&start);
65527
65528 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65529
65530 while (true) {
65531 todo = 0;
65532 + if (time_after(jiffies, end_time))
65533 + timedout = true;
65534 read_lock(&tasklist_lock);
65535 do_each_thread(g, p) {
65536 if (frozen(p) || !freezable(p))
65537 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65538 * try_to_stop() after schedule() in ptrace/signal
65539 * stop sees TIF_FREEZE.
65540 */
65541 - if (!task_is_stopped_or_traced(p) &&
65542 - !freezer_should_skip(p))
65543 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65544 todo++;
65545 + if (timedout) {
65546 + printk(KERN_ERR "Task refusing to freeze:\n");
65547 + sched_show_task(p);
65548 + }
65549 + }
65550 } while_each_thread(g, p);
65551 read_unlock(&tasklist_lock);
65552
65553 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65554 todo += wq_busy;
65555 }
65556
65557 - if (!todo || time_after(jiffies, end_time))
65558 + if (!todo || timedout)
65559 break;
65560
65561 if (pm_wakeup_pending()) {
65562 diff --git a/kernel/printk.c b/kernel/printk.c
65563 index 7982a0a..2095fdc 100644
65564 --- a/kernel/printk.c
65565 +++ b/kernel/printk.c
65566 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65567 if (from_file && type != SYSLOG_ACTION_OPEN)
65568 return 0;
65569
65570 +#ifdef CONFIG_GRKERNSEC_DMESG
65571 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65572 + return -EPERM;
65573 +#endif
65574 +
65575 if (syslog_action_restricted(type)) {
65576 if (capable(CAP_SYSLOG))
65577 return 0;
65578 diff --git a/kernel/profile.c b/kernel/profile.c
65579 index 76b8e77..a2930e8 100644
65580 --- a/kernel/profile.c
65581 +++ b/kernel/profile.c
65582 @@ -39,7 +39,7 @@ struct profile_hit {
65583 /* Oprofile timer tick hook */
65584 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65585
65586 -static atomic_t *prof_buffer;
65587 +static atomic_unchecked_t *prof_buffer;
65588 static unsigned long prof_len, prof_shift;
65589
65590 int prof_on __read_mostly;
65591 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65592 hits[i].pc = 0;
65593 continue;
65594 }
65595 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65596 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65597 hits[i].hits = hits[i].pc = 0;
65598 }
65599 }
65600 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65601 * Add the current hit(s) and flush the write-queue out
65602 * to the global buffer:
65603 */
65604 - atomic_add(nr_hits, &prof_buffer[pc]);
65605 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65606 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65607 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65608 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65609 hits[i].pc = hits[i].hits = 0;
65610 }
65611 out:
65612 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65613 {
65614 unsigned long pc;
65615 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65616 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65617 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65618 }
65619 #endif /* !CONFIG_SMP */
65620
65621 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65622 return -EFAULT;
65623 buf++; p++; count--; read++;
65624 }
65625 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65626 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65627 if (copy_to_user(buf, (void *)pnt, count))
65628 return -EFAULT;
65629 read += count;
65630 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65631 }
65632 #endif
65633 profile_discard_flip_buffers();
65634 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65635 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65636 return count;
65637 }
65638
65639 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65640 index 78ab24a..332c915 100644
65641 --- a/kernel/ptrace.c
65642 +++ b/kernel/ptrace.c
65643 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65644 return ret;
65645 }
65646
65647 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65648 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65649 + unsigned int log)
65650 {
65651 const struct cred *cred = current_cred(), *tcred;
65652
65653 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65654 cred->gid == tcred->sgid &&
65655 cred->gid == tcred->gid))
65656 goto ok;
65657 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65658 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65659 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65660 goto ok;
65661 rcu_read_unlock();
65662 return -EPERM;
65663 @@ -207,7 +209,9 @@ ok:
65664 smp_rmb();
65665 if (task->mm)
65666 dumpable = get_dumpable(task->mm);
65667 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65668 + if (!dumpable &&
65669 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65670 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65671 return -EPERM;
65672
65673 return security_ptrace_access_check(task, mode);
65674 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65675 {
65676 int err;
65677 task_lock(task);
65678 - err = __ptrace_may_access(task, mode);
65679 + err = __ptrace_may_access(task, mode, 0);
65680 + task_unlock(task);
65681 + return !err;
65682 +}
65683 +
65684 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65685 +{
65686 + return __ptrace_may_access(task, mode, 0);
65687 +}
65688 +
65689 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65690 +{
65691 + int err;
65692 + task_lock(task);
65693 + err = __ptrace_may_access(task, mode, 1);
65694 task_unlock(task);
65695 return !err;
65696 }
65697 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65698 goto out;
65699
65700 task_lock(task);
65701 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65702 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65703 task_unlock(task);
65704 if (retval)
65705 goto unlock_creds;
65706 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65707 task->ptrace = PT_PTRACED;
65708 if (seize)
65709 task->ptrace |= PT_SEIZED;
65710 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65711 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65712 task->ptrace |= PT_PTRACE_CAP;
65713
65714 __ptrace_link(task, current);
65715 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65716 break;
65717 return -EIO;
65718 }
65719 - if (copy_to_user(dst, buf, retval))
65720 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65721 return -EFAULT;
65722 copied += retval;
65723 src += retval;
65724 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65725 bool seized = child->ptrace & PT_SEIZED;
65726 int ret = -EIO;
65727 siginfo_t siginfo, *si;
65728 - void __user *datavp = (void __user *) data;
65729 + void __user *datavp = (__force void __user *) data;
65730 unsigned long __user *datalp = datavp;
65731 unsigned long flags;
65732
65733 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65734 goto out;
65735 }
65736
65737 + if (gr_handle_ptrace(child, request)) {
65738 + ret = -EPERM;
65739 + goto out_put_task_struct;
65740 + }
65741 +
65742 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65743 ret = ptrace_attach(child, request, data);
65744 /*
65745 * Some architectures need to do book-keeping after
65746 * a ptrace attach.
65747 */
65748 - if (!ret)
65749 + if (!ret) {
65750 arch_ptrace_attach(child);
65751 + gr_audit_ptrace(child);
65752 + }
65753 goto out_put_task_struct;
65754 }
65755
65756 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65757 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65758 if (copied != sizeof(tmp))
65759 return -EIO;
65760 - return put_user(tmp, (unsigned long __user *)data);
65761 + return put_user(tmp, (__force unsigned long __user *)data);
65762 }
65763
65764 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65765 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65766 goto out;
65767 }
65768
65769 + if (gr_handle_ptrace(child, request)) {
65770 + ret = -EPERM;
65771 + goto out_put_task_struct;
65772 + }
65773 +
65774 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65775 ret = ptrace_attach(child, request, data);
65776 /*
65777 * Some architectures need to do book-keeping after
65778 * a ptrace attach.
65779 */
65780 - if (!ret)
65781 + if (!ret) {
65782 arch_ptrace_attach(child);
65783 + gr_audit_ptrace(child);
65784 + }
65785 goto out_put_task_struct;
65786 }
65787
65788 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65789 index 764825c..3aa6ac4 100644
65790 --- a/kernel/rcutorture.c
65791 +++ b/kernel/rcutorture.c
65792 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65793 { 0 };
65794 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65795 { 0 };
65796 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65797 -static atomic_t n_rcu_torture_alloc;
65798 -static atomic_t n_rcu_torture_alloc_fail;
65799 -static atomic_t n_rcu_torture_free;
65800 -static atomic_t n_rcu_torture_mberror;
65801 -static atomic_t n_rcu_torture_error;
65802 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65803 +static atomic_unchecked_t n_rcu_torture_alloc;
65804 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65805 +static atomic_unchecked_t n_rcu_torture_free;
65806 +static atomic_unchecked_t n_rcu_torture_mberror;
65807 +static atomic_unchecked_t n_rcu_torture_error;
65808 static long n_rcu_torture_boost_ktrerror;
65809 static long n_rcu_torture_boost_rterror;
65810 static long n_rcu_torture_boost_failure;
65811 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65812
65813 spin_lock_bh(&rcu_torture_lock);
65814 if (list_empty(&rcu_torture_freelist)) {
65815 - atomic_inc(&n_rcu_torture_alloc_fail);
65816 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65817 spin_unlock_bh(&rcu_torture_lock);
65818 return NULL;
65819 }
65820 - atomic_inc(&n_rcu_torture_alloc);
65821 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65822 p = rcu_torture_freelist.next;
65823 list_del_init(p);
65824 spin_unlock_bh(&rcu_torture_lock);
65825 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65826 static void
65827 rcu_torture_free(struct rcu_torture *p)
65828 {
65829 - atomic_inc(&n_rcu_torture_free);
65830 + atomic_inc_unchecked(&n_rcu_torture_free);
65831 spin_lock_bh(&rcu_torture_lock);
65832 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65833 spin_unlock_bh(&rcu_torture_lock);
65834 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65835 i = rp->rtort_pipe_count;
65836 if (i > RCU_TORTURE_PIPE_LEN)
65837 i = RCU_TORTURE_PIPE_LEN;
65838 - atomic_inc(&rcu_torture_wcount[i]);
65839 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65840 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65841 rp->rtort_mbtest = 0;
65842 rcu_torture_free(rp);
65843 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65844 i = rp->rtort_pipe_count;
65845 if (i > RCU_TORTURE_PIPE_LEN)
65846 i = RCU_TORTURE_PIPE_LEN;
65847 - atomic_inc(&rcu_torture_wcount[i]);
65848 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65849 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65850 rp->rtort_mbtest = 0;
65851 list_del(&rp->rtort_free);
65852 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65853 i = old_rp->rtort_pipe_count;
65854 if (i > RCU_TORTURE_PIPE_LEN)
65855 i = RCU_TORTURE_PIPE_LEN;
65856 - atomic_inc(&rcu_torture_wcount[i]);
65857 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65858 old_rp->rtort_pipe_count++;
65859 cur_ops->deferred_free(old_rp);
65860 }
65861 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65862 return;
65863 }
65864 if (p->rtort_mbtest == 0)
65865 - atomic_inc(&n_rcu_torture_mberror);
65866 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65867 spin_lock(&rand_lock);
65868 cur_ops->read_delay(&rand);
65869 n_rcu_torture_timers++;
65870 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65871 continue;
65872 }
65873 if (p->rtort_mbtest == 0)
65874 - atomic_inc(&n_rcu_torture_mberror);
65875 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65876 cur_ops->read_delay(&rand);
65877 preempt_disable();
65878 pipe_count = p->rtort_pipe_count;
65879 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65880 rcu_torture_current,
65881 rcu_torture_current_version,
65882 list_empty(&rcu_torture_freelist),
65883 - atomic_read(&n_rcu_torture_alloc),
65884 - atomic_read(&n_rcu_torture_alloc_fail),
65885 - atomic_read(&n_rcu_torture_free),
65886 - atomic_read(&n_rcu_torture_mberror),
65887 + atomic_read_unchecked(&n_rcu_torture_alloc),
65888 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65889 + atomic_read_unchecked(&n_rcu_torture_free),
65890 + atomic_read_unchecked(&n_rcu_torture_mberror),
65891 n_rcu_torture_boost_ktrerror,
65892 n_rcu_torture_boost_rterror,
65893 n_rcu_torture_boost_failure,
65894 n_rcu_torture_boosts,
65895 n_rcu_torture_timers);
65896 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65897 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65898 n_rcu_torture_boost_ktrerror != 0 ||
65899 n_rcu_torture_boost_rterror != 0 ||
65900 n_rcu_torture_boost_failure != 0)
65901 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65902 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65903 if (i > 1) {
65904 cnt += sprintf(&page[cnt], "!!! ");
65905 - atomic_inc(&n_rcu_torture_error);
65906 + atomic_inc_unchecked(&n_rcu_torture_error);
65907 WARN_ON_ONCE(1);
65908 }
65909 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65910 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65911 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65912 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65913 cnt += sprintf(&page[cnt], " %d",
65914 - atomic_read(&rcu_torture_wcount[i]));
65915 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65916 }
65917 cnt += sprintf(&page[cnt], "\n");
65918 if (cur_ops->stats)
65919 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65920
65921 if (cur_ops->cleanup)
65922 cur_ops->cleanup();
65923 - if (atomic_read(&n_rcu_torture_error))
65924 + if (atomic_read_unchecked(&n_rcu_torture_error))
65925 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65926 else
65927 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65928 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65929
65930 rcu_torture_current = NULL;
65931 rcu_torture_current_version = 0;
65932 - atomic_set(&n_rcu_torture_alloc, 0);
65933 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65934 - atomic_set(&n_rcu_torture_free, 0);
65935 - atomic_set(&n_rcu_torture_mberror, 0);
65936 - atomic_set(&n_rcu_torture_error, 0);
65937 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65938 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65939 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65940 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65941 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65942 n_rcu_torture_boost_ktrerror = 0;
65943 n_rcu_torture_boost_rterror = 0;
65944 n_rcu_torture_boost_failure = 0;
65945 n_rcu_torture_boosts = 0;
65946 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65947 - atomic_set(&rcu_torture_wcount[i], 0);
65948 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65949 for_each_possible_cpu(cpu) {
65950 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65951 per_cpu(rcu_torture_count, cpu)[i] = 0;
65952 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65953 index 6b76d81..7afc1b3 100644
65954 --- a/kernel/rcutree.c
65955 +++ b/kernel/rcutree.c
65956 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65957 trace_rcu_dyntick("Start");
65958 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65959 smp_mb__before_atomic_inc(); /* See above. */
65960 - atomic_inc(&rdtp->dynticks);
65961 + atomic_inc_unchecked(&rdtp->dynticks);
65962 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65963 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65964 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65965 local_irq_restore(flags);
65966 }
65967
65968 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65969 return;
65970 }
65971 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65972 - atomic_inc(&rdtp->dynticks);
65973 + atomic_inc_unchecked(&rdtp->dynticks);
65974 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65975 smp_mb__after_atomic_inc(); /* See above. */
65976 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65977 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65978 trace_rcu_dyntick("End");
65979 local_irq_restore(flags);
65980 }
65981 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65982 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65983
65984 if (rdtp->dynticks_nmi_nesting == 0 &&
65985 - (atomic_read(&rdtp->dynticks) & 0x1))
65986 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65987 return;
65988 rdtp->dynticks_nmi_nesting++;
65989 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65990 - atomic_inc(&rdtp->dynticks);
65991 + atomic_inc_unchecked(&rdtp->dynticks);
65992 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65993 smp_mb__after_atomic_inc(); /* See above. */
65994 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65995 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65996 }
65997
65998 /**
65999 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
66000 return;
66001 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
66002 smp_mb__before_atomic_inc(); /* See above. */
66003 - atomic_inc(&rdtp->dynticks);
66004 + atomic_inc_unchecked(&rdtp->dynticks);
66005 smp_mb__after_atomic_inc(); /* Force delay to next write. */
66006 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
66007 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
66008 }
66009
66010 /**
66011 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
66012 */
66013 static int dyntick_save_progress_counter(struct rcu_data *rdp)
66014 {
66015 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
66016 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66017 return 0;
66018 }
66019
66020 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
66021 unsigned int curr;
66022 unsigned int snap;
66023
66024 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
66025 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66026 snap = (unsigned int)rdp->dynticks_snap;
66027
66028 /*
66029 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
66030 /*
66031 * Do RCU core processing for the current CPU.
66032 */
66033 -static void rcu_process_callbacks(struct softirq_action *unused)
66034 +static void rcu_process_callbacks(void)
66035 {
66036 trace_rcu_utilization("Start RCU core");
66037 __rcu_process_callbacks(&rcu_sched_state,
66038 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
66039 index 849ce9e..74bc9de 100644
66040 --- a/kernel/rcutree.h
66041 +++ b/kernel/rcutree.h
66042 @@ -86,7 +86,7 @@
66043 struct rcu_dynticks {
66044 int dynticks_nesting; /* Track irq/process nesting level. */
66045 int dynticks_nmi_nesting; /* Track NMI nesting level. */
66046 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
66047 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
66048 };
66049
66050 /* RCU's kthread states for tracing. */
66051 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
66052 index 4b9b9f8..2326053 100644
66053 --- a/kernel/rcutree_plugin.h
66054 +++ b/kernel/rcutree_plugin.h
66055 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
66056
66057 /* Clean up and exit. */
66058 smp_mb(); /* ensure expedited GP seen before counter increment. */
66059 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
66060 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
66061 unlock_mb_ret:
66062 mutex_unlock(&sync_rcu_preempt_exp_mutex);
66063 mb_ret:
66064 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
66065
66066 #else /* #ifndef CONFIG_SMP */
66067
66068 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
66069 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
66070 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
66071 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
66072
66073 static int synchronize_sched_expedited_cpu_stop(void *data)
66074 {
66075 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
66076 int firstsnap, s, snap, trycount = 0;
66077
66078 /* Note that atomic_inc_return() implies full memory barrier. */
66079 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
66080 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
66081 get_online_cpus();
66082
66083 /*
66084 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
66085 }
66086
66087 /* Check to see if someone else did our work for us. */
66088 - s = atomic_read(&sync_sched_expedited_done);
66089 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66090 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
66091 smp_mb(); /* ensure test happens before caller kfree */
66092 return;
66093 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
66094 * grace period works for us.
66095 */
66096 get_online_cpus();
66097 - snap = atomic_read(&sync_sched_expedited_started) - 1;
66098 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
66099 smp_mb(); /* ensure read is before try_stop_cpus(). */
66100 }
66101
66102 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
66103 * than we did beat us to the punch.
66104 */
66105 do {
66106 - s = atomic_read(&sync_sched_expedited_done);
66107 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66108 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
66109 smp_mb(); /* ensure test happens before caller kfree */
66110 break;
66111 }
66112 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
66113 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
66114
66115 put_online_cpus();
66116 }
66117 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
66118 for_each_online_cpu(thatcpu) {
66119 if (thatcpu == cpu)
66120 continue;
66121 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
66122 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
66123 thatcpu).dynticks);
66124 smp_mb(); /* Order sampling of snap with end of grace period. */
66125 if ((snap & 0x1) != 0) {
66126 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
66127 index 9feffa4..54058df 100644
66128 --- a/kernel/rcutree_trace.c
66129 +++ b/kernel/rcutree_trace.c
66130 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
66131 rdp->qs_pending);
66132 #ifdef CONFIG_NO_HZ
66133 seq_printf(m, " dt=%d/%d/%d df=%lu",
66134 - atomic_read(&rdp->dynticks->dynticks),
66135 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66136 rdp->dynticks->dynticks_nesting,
66137 rdp->dynticks->dynticks_nmi_nesting,
66138 rdp->dynticks_fqs);
66139 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
66140 rdp->qs_pending);
66141 #ifdef CONFIG_NO_HZ
66142 seq_printf(m, ",%d,%d,%d,%lu",
66143 - atomic_read(&rdp->dynticks->dynticks),
66144 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66145 rdp->dynticks->dynticks_nesting,
66146 rdp->dynticks->dynticks_nmi_nesting,
66147 rdp->dynticks_fqs);
66148 diff --git a/kernel/resource.c b/kernel/resource.c
66149 index 7640b3a..5879283 100644
66150 --- a/kernel/resource.c
66151 +++ b/kernel/resource.c
66152 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
66153
66154 static int __init ioresources_init(void)
66155 {
66156 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66157 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66158 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
66159 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
66160 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66161 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
66162 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
66163 +#endif
66164 +#else
66165 proc_create("ioports", 0, NULL, &proc_ioports_operations);
66166 proc_create("iomem", 0, NULL, &proc_iomem_operations);
66167 +#endif
66168 return 0;
66169 }
66170 __initcall(ioresources_init);
66171 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
66172 index 3d9f31c..7fefc9e 100644
66173 --- a/kernel/rtmutex-tester.c
66174 +++ b/kernel/rtmutex-tester.c
66175 @@ -20,7 +20,7 @@
66176 #define MAX_RT_TEST_MUTEXES 8
66177
66178 static spinlock_t rttest_lock;
66179 -static atomic_t rttest_event;
66180 +static atomic_unchecked_t rttest_event;
66181
66182 struct test_thread_data {
66183 int opcode;
66184 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66185
66186 case RTTEST_LOCKCONT:
66187 td->mutexes[td->opdata] = 1;
66188 - td->event = atomic_add_return(1, &rttest_event);
66189 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66190 return 0;
66191
66192 case RTTEST_RESET:
66193 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66194 return 0;
66195
66196 case RTTEST_RESETEVENT:
66197 - atomic_set(&rttest_event, 0);
66198 + atomic_set_unchecked(&rttest_event, 0);
66199 return 0;
66200
66201 default:
66202 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66203 return ret;
66204
66205 td->mutexes[id] = 1;
66206 - td->event = atomic_add_return(1, &rttest_event);
66207 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66208 rt_mutex_lock(&mutexes[id]);
66209 - td->event = atomic_add_return(1, &rttest_event);
66210 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66211 td->mutexes[id] = 4;
66212 return 0;
66213
66214 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66215 return ret;
66216
66217 td->mutexes[id] = 1;
66218 - td->event = atomic_add_return(1, &rttest_event);
66219 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66220 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
66221 - td->event = atomic_add_return(1, &rttest_event);
66222 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66223 td->mutexes[id] = ret ? 0 : 4;
66224 return ret ? -EINTR : 0;
66225
66226 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66227 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
66228 return ret;
66229
66230 - td->event = atomic_add_return(1, &rttest_event);
66231 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66232 rt_mutex_unlock(&mutexes[id]);
66233 - td->event = atomic_add_return(1, &rttest_event);
66234 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66235 td->mutexes[id] = 0;
66236 return 0;
66237
66238 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66239 break;
66240
66241 td->mutexes[dat] = 2;
66242 - td->event = atomic_add_return(1, &rttest_event);
66243 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66244 break;
66245
66246 default:
66247 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66248 return;
66249
66250 td->mutexes[dat] = 3;
66251 - td->event = atomic_add_return(1, &rttest_event);
66252 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66253 break;
66254
66255 case RTTEST_LOCKNOWAIT:
66256 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66257 return;
66258
66259 td->mutexes[dat] = 1;
66260 - td->event = atomic_add_return(1, &rttest_event);
66261 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66262 return;
66263
66264 default:
66265 diff --git a/kernel/sched.c b/kernel/sched.c
66266 index d6b149c..896cbb8 100644
66267 --- a/kernel/sched.c
66268 +++ b/kernel/sched.c
66269 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
66270 BUG(); /* the idle class will always have a runnable task */
66271 }
66272
66273 +#ifdef CONFIG_GRKERNSEC_SETXID
66274 +extern void gr_delayed_cred_worker(void);
66275 +static inline void gr_cred_schedule(void)
66276 +{
66277 + if (unlikely(current->delayed_cred))
66278 + gr_delayed_cred_worker();
66279 +}
66280 +#else
66281 +static inline void gr_cred_schedule(void)
66282 +{
66283 +}
66284 +#endif
66285 +
66286 /*
66287 * __schedule() is the main scheduler function.
66288 */
66289 @@ -4408,6 +4421,8 @@ need_resched:
66290
66291 schedule_debug(prev);
66292
66293 + gr_cred_schedule();
66294 +
66295 if (sched_feat(HRTICK))
66296 hrtick_clear(rq);
66297
66298 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66299 /* convert nice value [19,-20] to rlimit style value [1,40] */
66300 int nice_rlim = 20 - nice;
66301
66302 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66303 +
66304 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66305 capable(CAP_SYS_NICE));
66306 }
66307 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66308 if (nice > 19)
66309 nice = 19;
66310
66311 - if (increment < 0 && !can_nice(current, nice))
66312 + if (increment < 0 && (!can_nice(current, nice) ||
66313 + gr_handle_chroot_nice()))
66314 return -EPERM;
66315
66316 retval = security_task_setnice(current, nice);
66317 @@ -5288,6 +5306,7 @@ recheck:
66318 unsigned long rlim_rtprio =
66319 task_rlimit(p, RLIMIT_RTPRIO);
66320
66321 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66322 /* can't set/change the rt policy */
66323 if (policy != p->policy && !rlim_rtprio)
66324 return -EPERM;
66325 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66326 index 429242f..d7cca82 100644
66327 --- a/kernel/sched_autogroup.c
66328 +++ b/kernel/sched_autogroup.c
66329 @@ -7,7 +7,7 @@
66330
66331 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66332 static struct autogroup autogroup_default;
66333 -static atomic_t autogroup_seq_nr;
66334 +static atomic_unchecked_t autogroup_seq_nr;
66335
66336 static void __init autogroup_init(struct task_struct *init_task)
66337 {
66338 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66339
66340 kref_init(&ag->kref);
66341 init_rwsem(&ag->lock);
66342 - ag->id = atomic_inc_return(&autogroup_seq_nr);
66343 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66344 ag->tg = tg;
66345 #ifdef CONFIG_RT_GROUP_SCHED
66346 /*
66347 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66348 index 8a39fa3..34f3dbc 100644
66349 --- a/kernel/sched_fair.c
66350 +++ b/kernel/sched_fair.c
66351 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66352 * run_rebalance_domains is triggered when needed from the scheduler tick.
66353 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66354 */
66355 -static void run_rebalance_domains(struct softirq_action *h)
66356 +static void run_rebalance_domains(void)
66357 {
66358 int this_cpu = smp_processor_id();
66359 struct rq *this_rq = cpu_rq(this_cpu);
66360 diff --git a/kernel/signal.c b/kernel/signal.c
66361 index 2065515..aed2987 100644
66362 --- a/kernel/signal.c
66363 +++ b/kernel/signal.c
66364 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66365
66366 int print_fatal_signals __read_mostly;
66367
66368 -static void __user *sig_handler(struct task_struct *t, int sig)
66369 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
66370 {
66371 return t->sighand->action[sig - 1].sa.sa_handler;
66372 }
66373
66374 -static int sig_handler_ignored(void __user *handler, int sig)
66375 +static int sig_handler_ignored(__sighandler_t handler, int sig)
66376 {
66377 /* Is it explicitly or implicitly ignored? */
66378 return handler == SIG_IGN ||
66379 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66380 static int sig_task_ignored(struct task_struct *t, int sig,
66381 int from_ancestor_ns)
66382 {
66383 - void __user *handler;
66384 + __sighandler_t handler;
66385
66386 handler = sig_handler(t, sig);
66387
66388 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66389 atomic_inc(&user->sigpending);
66390 rcu_read_unlock();
66391
66392 + if (!override_rlimit)
66393 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66394 +
66395 if (override_rlimit ||
66396 atomic_read(&user->sigpending) <=
66397 task_rlimit(t, RLIMIT_SIGPENDING)) {
66398 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66399
66400 int unhandled_signal(struct task_struct *tsk, int sig)
66401 {
66402 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66403 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66404 if (is_global_init(tsk))
66405 return 1;
66406 if (handler != SIG_IGN && handler != SIG_DFL)
66407 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66408 }
66409 }
66410
66411 + /* allow glibc communication via tgkill to other threads in our
66412 + thread group */
66413 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66414 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66415 + && gr_handle_signal(t, sig))
66416 + return -EPERM;
66417 +
66418 return security_task_kill(t, info, sig, 0);
66419 }
66420
66421 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66422 return send_signal(sig, info, p, 1);
66423 }
66424
66425 -static int
66426 +int
66427 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66428 {
66429 return send_signal(sig, info, t, 0);
66430 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66431 unsigned long int flags;
66432 int ret, blocked, ignored;
66433 struct k_sigaction *action;
66434 + int is_unhandled = 0;
66435
66436 spin_lock_irqsave(&t->sighand->siglock, flags);
66437 action = &t->sighand->action[sig-1];
66438 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66439 }
66440 if (action->sa.sa_handler == SIG_DFL)
66441 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66442 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66443 + is_unhandled = 1;
66444 ret = specific_send_sig_info(sig, info, t);
66445 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66446
66447 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66448 + normal operation */
66449 + if (is_unhandled) {
66450 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66451 + gr_handle_crash(t, sig);
66452 + }
66453 +
66454 return ret;
66455 }
66456
66457 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66458 ret = check_kill_permission(sig, info, p);
66459 rcu_read_unlock();
66460
66461 - if (!ret && sig)
66462 + if (!ret && sig) {
66463 ret = do_send_sig_info(sig, info, p, true);
66464 + if (!ret)
66465 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66466 + }
66467
66468 return ret;
66469 }
66470 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66471 int error = -ESRCH;
66472
66473 rcu_read_lock();
66474 - p = find_task_by_vpid(pid);
66475 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66476 + /* allow glibc communication via tgkill to other threads in our
66477 + thread group */
66478 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66479 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66480 + p = find_task_by_vpid_unrestricted(pid);
66481 + else
66482 +#endif
66483 + p = find_task_by_vpid(pid);
66484 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66485 error = check_kill_permission(sig, info, p);
66486 /*
66487 diff --git a/kernel/smp.c b/kernel/smp.c
66488 index db197d6..17aef0b 100644
66489 --- a/kernel/smp.c
66490 +++ b/kernel/smp.c
66491 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66492 }
66493 EXPORT_SYMBOL(smp_call_function);
66494
66495 -void ipi_call_lock(void)
66496 +void ipi_call_lock(void) __acquires(call_function.lock)
66497 {
66498 raw_spin_lock(&call_function.lock);
66499 }
66500
66501 -void ipi_call_unlock(void)
66502 +void ipi_call_unlock(void) __releases(call_function.lock)
66503 {
66504 raw_spin_unlock(&call_function.lock);
66505 }
66506
66507 -void ipi_call_lock_irq(void)
66508 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66509 {
66510 raw_spin_lock_irq(&call_function.lock);
66511 }
66512
66513 -void ipi_call_unlock_irq(void)
66514 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66515 {
66516 raw_spin_unlock_irq(&call_function.lock);
66517 }
66518 diff --git a/kernel/softirq.c b/kernel/softirq.c
66519 index 2c71d91..1021f81 100644
66520 --- a/kernel/softirq.c
66521 +++ b/kernel/softirq.c
66522 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66523
66524 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66525
66526 -char *softirq_to_name[NR_SOFTIRQS] = {
66527 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66528 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66529 "TASKLET", "SCHED", "HRTIMER", "RCU"
66530 };
66531 @@ -235,7 +235,7 @@ restart:
66532 kstat_incr_softirqs_this_cpu(vec_nr);
66533
66534 trace_softirq_entry(vec_nr);
66535 - h->action(h);
66536 + h->action();
66537 trace_softirq_exit(vec_nr);
66538 if (unlikely(prev_count != preempt_count())) {
66539 printk(KERN_ERR "huh, entered softirq %u %s %p"
66540 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66541 local_irq_restore(flags);
66542 }
66543
66544 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66545 +void open_softirq(int nr, void (*action)(void))
66546 {
66547 - softirq_vec[nr].action = action;
66548 + pax_open_kernel();
66549 + *(void **)&softirq_vec[nr].action = action;
66550 + pax_close_kernel();
66551 }
66552
66553 /*
66554 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66555
66556 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66557
66558 -static void tasklet_action(struct softirq_action *a)
66559 +static void tasklet_action(void)
66560 {
66561 struct tasklet_struct *list;
66562
66563 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66564 }
66565 }
66566
66567 -static void tasklet_hi_action(struct softirq_action *a)
66568 +static void tasklet_hi_action(void)
66569 {
66570 struct tasklet_struct *list;
66571
66572 diff --git a/kernel/sys.c b/kernel/sys.c
66573 index 481611f..0754d86 100644
66574 --- a/kernel/sys.c
66575 +++ b/kernel/sys.c
66576 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66577 error = -EACCES;
66578 goto out;
66579 }
66580 +
66581 + if (gr_handle_chroot_setpriority(p, niceval)) {
66582 + error = -EACCES;
66583 + goto out;
66584 + }
66585 +
66586 no_nice = security_task_setnice(p, niceval);
66587 if (no_nice) {
66588 error = no_nice;
66589 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66590 goto error;
66591 }
66592
66593 + if (gr_check_group_change(new->gid, new->egid, -1))
66594 + goto error;
66595 +
66596 if (rgid != (gid_t) -1 ||
66597 (egid != (gid_t) -1 && egid != old->gid))
66598 new->sgid = new->egid;
66599 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66600 old = current_cred();
66601
66602 retval = -EPERM;
66603 +
66604 + if (gr_check_group_change(gid, gid, gid))
66605 + goto error;
66606 +
66607 if (nsown_capable(CAP_SETGID))
66608 new->gid = new->egid = new->sgid = new->fsgid = gid;
66609 else if (gid == old->gid || gid == old->sgid)
66610 @@ -618,7 +631,7 @@ error:
66611 /*
66612 * change the user struct in a credentials set to match the new UID
66613 */
66614 -static int set_user(struct cred *new)
66615 +int set_user(struct cred *new)
66616 {
66617 struct user_struct *new_user;
66618
66619 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66620 goto error;
66621 }
66622
66623 + if (gr_check_user_change(new->uid, new->euid, -1))
66624 + goto error;
66625 +
66626 if (new->uid != old->uid) {
66627 retval = set_user(new);
66628 if (retval < 0)
66629 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66630 old = current_cred();
66631
66632 retval = -EPERM;
66633 +
66634 + if (gr_check_crash_uid(uid))
66635 + goto error;
66636 + if (gr_check_user_change(uid, uid, uid))
66637 + goto error;
66638 +
66639 if (nsown_capable(CAP_SETUID)) {
66640 new->suid = new->uid = uid;
66641 if (uid != old->uid) {
66642 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66643 goto error;
66644 }
66645
66646 + if (gr_check_user_change(ruid, euid, -1))
66647 + goto error;
66648 +
66649 if (ruid != (uid_t) -1) {
66650 new->uid = ruid;
66651 if (ruid != old->uid) {
66652 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66653 goto error;
66654 }
66655
66656 + if (gr_check_group_change(rgid, egid, -1))
66657 + goto error;
66658 +
66659 if (rgid != (gid_t) -1)
66660 new->gid = rgid;
66661 if (egid != (gid_t) -1)
66662 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66663 old = current_cred();
66664 old_fsuid = old->fsuid;
66665
66666 + if (gr_check_user_change(-1, -1, uid))
66667 + goto error;
66668 +
66669 if (uid == old->uid || uid == old->euid ||
66670 uid == old->suid || uid == old->fsuid ||
66671 nsown_capable(CAP_SETUID)) {
66672 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66673 }
66674 }
66675
66676 +error:
66677 abort_creds(new);
66678 return old_fsuid;
66679
66680 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66681 if (gid == old->gid || gid == old->egid ||
66682 gid == old->sgid || gid == old->fsgid ||
66683 nsown_capable(CAP_SETGID)) {
66684 + if (gr_check_group_change(-1, -1, gid))
66685 + goto error;
66686 +
66687 if (gid != old_fsgid) {
66688 new->fsgid = gid;
66689 goto change_okay;
66690 }
66691 }
66692
66693 +error:
66694 abort_creds(new);
66695 return old_fsgid;
66696
66697 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66698 }
66699 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66700 snprintf(buf, len, "2.6.%u%s", v, rest);
66701 - ret = copy_to_user(release, buf, len);
66702 + if (len > sizeof(buf))
66703 + ret = -EFAULT;
66704 + else
66705 + ret = copy_to_user(release, buf, len);
66706 }
66707 return ret;
66708 }
66709 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66710 return -EFAULT;
66711
66712 down_read(&uts_sem);
66713 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66714 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66715 __OLD_UTS_LEN);
66716 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66717 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66718 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66719 __OLD_UTS_LEN);
66720 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66721 - error |= __copy_to_user(&name->release, &utsname()->release,
66722 + error |= __copy_to_user(name->release, &utsname()->release,
66723 __OLD_UTS_LEN);
66724 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66725 - error |= __copy_to_user(&name->version, &utsname()->version,
66726 + error |= __copy_to_user(name->version, &utsname()->version,
66727 __OLD_UTS_LEN);
66728 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66729 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66730 + error |= __copy_to_user(name->machine, &utsname()->machine,
66731 __OLD_UTS_LEN);
66732 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66733 up_read(&uts_sem);
66734 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66735 error = get_dumpable(me->mm);
66736 break;
66737 case PR_SET_DUMPABLE:
66738 - if (arg2 < 0 || arg2 > 1) {
66739 + if (arg2 > 1) {
66740 error = -EINVAL;
66741 break;
66742 }
66743 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66744 index ae27196..7506d69 100644
66745 --- a/kernel/sysctl.c
66746 +++ b/kernel/sysctl.c
66747 @@ -86,6 +86,13 @@
66748
66749
66750 #if defined(CONFIG_SYSCTL)
66751 +#include <linux/grsecurity.h>
66752 +#include <linux/grinternal.h>
66753 +
66754 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66755 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66756 + const int op);
66757 +extern int gr_handle_chroot_sysctl(const int op);
66758
66759 /* External variables not in a header file. */
66760 extern int sysctl_overcommit_memory;
66761 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66762 }
66763
66764 #endif
66765 +extern struct ctl_table grsecurity_table[];
66766
66767 static struct ctl_table root_table[];
66768 static struct ctl_table_root sysctl_table_root;
66769 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66770 int sysctl_legacy_va_layout;
66771 #endif
66772
66773 +#ifdef CONFIG_PAX_SOFTMODE
66774 +static ctl_table pax_table[] = {
66775 + {
66776 + .procname = "softmode",
66777 + .data = &pax_softmode,
66778 + .maxlen = sizeof(unsigned int),
66779 + .mode = 0600,
66780 + .proc_handler = &proc_dointvec,
66781 + },
66782 +
66783 + { }
66784 +};
66785 +#endif
66786 +
66787 /* The default sysctl tables: */
66788
66789 static struct ctl_table root_table[] = {
66790 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66791 #endif
66792
66793 static struct ctl_table kern_table[] = {
66794 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66795 + {
66796 + .procname = "grsecurity",
66797 + .mode = 0500,
66798 + .child = grsecurity_table,
66799 + },
66800 +#endif
66801 +
66802 +#ifdef CONFIG_PAX_SOFTMODE
66803 + {
66804 + .procname = "pax",
66805 + .mode = 0500,
66806 + .child = pax_table,
66807 + },
66808 +#endif
66809 +
66810 {
66811 .procname = "sched_child_runs_first",
66812 .data = &sysctl_sched_child_runs_first,
66813 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66814 .data = &modprobe_path,
66815 .maxlen = KMOD_PATH_LEN,
66816 .mode = 0644,
66817 - .proc_handler = proc_dostring,
66818 + .proc_handler = proc_dostring_modpriv,
66819 },
66820 {
66821 .procname = "modules_disabled",
66822 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66823 .extra1 = &zero,
66824 .extra2 = &one,
66825 },
66826 +#endif
66827 {
66828 .procname = "kptr_restrict",
66829 .data = &kptr_restrict,
66830 .maxlen = sizeof(int),
66831 .mode = 0644,
66832 .proc_handler = proc_dmesg_restrict,
66833 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66834 + .extra1 = &two,
66835 +#else
66836 .extra1 = &zero,
66837 +#endif
66838 .extra2 = &two,
66839 },
66840 -#endif
66841 {
66842 .procname = "ngroups_max",
66843 .data = &ngroups_max,
66844 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66845 .proc_handler = proc_dointvec_minmax,
66846 .extra1 = &zero,
66847 },
66848 + {
66849 + .procname = "heap_stack_gap",
66850 + .data = &sysctl_heap_stack_gap,
66851 + .maxlen = sizeof(sysctl_heap_stack_gap),
66852 + .mode = 0644,
66853 + .proc_handler = proc_doulongvec_minmax,
66854 + },
66855 #else
66856 {
66857 .procname = "nr_trim_pages",
66858 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66859 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66860 {
66861 int mode;
66862 + int error;
66863 +
66864 + if (table->parent != NULL && table->parent->procname != NULL &&
66865 + table->procname != NULL &&
66866 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66867 + return -EACCES;
66868 + if (gr_handle_chroot_sysctl(op))
66869 + return -EACCES;
66870 + error = gr_handle_sysctl(table, op);
66871 + if (error)
66872 + return error;
66873
66874 if (root->permissions)
66875 mode = root->permissions(root, current->nsproxy, table);
66876 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66877 buffer, lenp, ppos);
66878 }
66879
66880 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66881 + void __user *buffer, size_t *lenp, loff_t *ppos)
66882 +{
66883 + if (write && !capable(CAP_SYS_MODULE))
66884 + return -EPERM;
66885 +
66886 + return _proc_do_string(table->data, table->maxlen, write,
66887 + buffer, lenp, ppos);
66888 +}
66889 +
66890 static size_t proc_skip_spaces(char **buf)
66891 {
66892 size_t ret;
66893 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66894 len = strlen(tmp);
66895 if (len > *size)
66896 len = *size;
66897 + if (len > sizeof(tmp))
66898 + len = sizeof(tmp);
66899 if (copy_to_user(*buf, tmp, len))
66900 return -EFAULT;
66901 *size -= len;
66902 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66903 *i = val;
66904 } else {
66905 val = convdiv * (*i) / convmul;
66906 - if (!first)
66907 + if (!first) {
66908 err = proc_put_char(&buffer, &left, '\t');
66909 + if (err)
66910 + break;
66911 + }
66912 err = proc_put_long(&buffer, &left, val, false);
66913 if (err)
66914 break;
66915 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66916 return -ENOSYS;
66917 }
66918
66919 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66920 + void __user *buffer, size_t *lenp, loff_t *ppos)
66921 +{
66922 + return -ENOSYS;
66923 +}
66924 +
66925 int proc_dointvec(struct ctl_table *table, int write,
66926 void __user *buffer, size_t *lenp, loff_t *ppos)
66927 {
66928 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66929 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66930 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66931 EXPORT_SYMBOL(proc_dostring);
66932 +EXPORT_SYMBOL(proc_dostring_modpriv);
66933 EXPORT_SYMBOL(proc_doulongvec_minmax);
66934 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66935 EXPORT_SYMBOL(register_sysctl_table);
66936 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66937 index a650694..aaeeb20 100644
66938 --- a/kernel/sysctl_binary.c
66939 +++ b/kernel/sysctl_binary.c
66940 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66941 int i;
66942
66943 set_fs(KERNEL_DS);
66944 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66945 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66946 set_fs(old_fs);
66947 if (result < 0)
66948 goto out_kfree;
66949 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66950 }
66951
66952 set_fs(KERNEL_DS);
66953 - result = vfs_write(file, buffer, str - buffer, &pos);
66954 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66955 set_fs(old_fs);
66956 if (result < 0)
66957 goto out_kfree;
66958 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66959 int i;
66960
66961 set_fs(KERNEL_DS);
66962 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66963 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66964 set_fs(old_fs);
66965 if (result < 0)
66966 goto out_kfree;
66967 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66968 }
66969
66970 set_fs(KERNEL_DS);
66971 - result = vfs_write(file, buffer, str - buffer, &pos);
66972 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66973 set_fs(old_fs);
66974 if (result < 0)
66975 goto out_kfree;
66976 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66977 int i;
66978
66979 set_fs(KERNEL_DS);
66980 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66981 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66982 set_fs(old_fs);
66983 if (result < 0)
66984 goto out;
66985 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66986 __le16 dnaddr;
66987
66988 set_fs(KERNEL_DS);
66989 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66990 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66991 set_fs(old_fs);
66992 if (result < 0)
66993 goto out;
66994 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66995 le16_to_cpu(dnaddr) & 0x3ff);
66996
66997 set_fs(KERNEL_DS);
66998 - result = vfs_write(file, buf, len, &pos);
66999 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
67000 set_fs(old_fs);
67001 if (result < 0)
67002 goto out;
67003 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
67004 index 362da65..ab8ef8c 100644
67005 --- a/kernel/sysctl_check.c
67006 +++ b/kernel/sysctl_check.c
67007 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
67008 set_fail(&fail, table, "Directory with extra2");
67009 } else {
67010 if ((table->proc_handler == proc_dostring) ||
67011 + (table->proc_handler == proc_dostring_modpriv) ||
67012 (table->proc_handler == proc_dointvec) ||
67013 (table->proc_handler == proc_dointvec_minmax) ||
67014 (table->proc_handler == proc_dointvec_jiffies) ||
67015 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
67016 index e660464..c8b9e67 100644
67017 --- a/kernel/taskstats.c
67018 +++ b/kernel/taskstats.c
67019 @@ -27,9 +27,12 @@
67020 #include <linux/cgroup.h>
67021 #include <linux/fs.h>
67022 #include <linux/file.h>
67023 +#include <linux/grsecurity.h>
67024 #include <net/genetlink.h>
67025 #include <linux/atomic.h>
67026
67027 +extern int gr_is_taskstats_denied(int pid);
67028 +
67029 /*
67030 * Maximum length of a cpumask that can be specified in
67031 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
67032 @@ -556,6 +559,9 @@ err:
67033
67034 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
67035 {
67036 + if (gr_is_taskstats_denied(current->pid))
67037 + return -EACCES;
67038 +
67039 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
67040 return cmd_attr_register_cpumask(info);
67041 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
67042 diff --git a/kernel/time.c b/kernel/time.c
67043 index 73e416d..cfc6f69 100644
67044 --- a/kernel/time.c
67045 +++ b/kernel/time.c
67046 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
67047 return error;
67048
67049 if (tz) {
67050 + /* we log in do_settimeofday called below, so don't log twice
67051 + */
67052 + if (!tv)
67053 + gr_log_timechange();
67054 +
67055 /* SMP safe, global irq locking makes it work. */
67056 sys_tz = *tz;
67057 update_vsyscall_tz();
67058 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
67059 index 8a46f5d..bbe6f9c 100644
67060 --- a/kernel/time/alarmtimer.c
67061 +++ b/kernel/time/alarmtimer.c
67062 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
67063 struct platform_device *pdev;
67064 int error = 0;
67065 int i;
67066 - struct k_clock alarm_clock = {
67067 + static struct k_clock alarm_clock = {
67068 .clock_getres = alarm_clock_getres,
67069 .clock_get = alarm_clock_get,
67070 .timer_create = alarm_timer_create,
67071 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
67072 index fd4a7b1..fae5c2a 100644
67073 --- a/kernel/time/tick-broadcast.c
67074 +++ b/kernel/time/tick-broadcast.c
67075 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
67076 * then clear the broadcast bit.
67077 */
67078 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
67079 - int cpu = smp_processor_id();
67080 + cpu = smp_processor_id();
67081
67082 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
67083 tick_broadcast_clear_oneshot(cpu);
67084 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
67085 index 2378413..be455fd 100644
67086 --- a/kernel/time/timekeeping.c
67087 +++ b/kernel/time/timekeeping.c
67088 @@ -14,6 +14,7 @@
67089 #include <linux/init.h>
67090 #include <linux/mm.h>
67091 #include <linux/sched.h>
67092 +#include <linux/grsecurity.h>
67093 #include <linux/syscore_ops.h>
67094 #include <linux/clocksource.h>
67095 #include <linux/jiffies.h>
67096 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
67097 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
67098 return -EINVAL;
67099
67100 + gr_log_timechange();
67101 +
67102 write_seqlock_irqsave(&xtime_lock, flags);
67103
67104 timekeeping_forward_now();
67105 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
67106 index 3258455..f35227d 100644
67107 --- a/kernel/time/timer_list.c
67108 +++ b/kernel/time/timer_list.c
67109 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
67110
67111 static void print_name_offset(struct seq_file *m, void *sym)
67112 {
67113 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67114 + SEQ_printf(m, "<%p>", NULL);
67115 +#else
67116 char symname[KSYM_NAME_LEN];
67117
67118 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
67119 SEQ_printf(m, "<%pK>", sym);
67120 else
67121 SEQ_printf(m, "%s", symname);
67122 +#endif
67123 }
67124
67125 static void
67126 @@ -112,7 +116,11 @@ next_one:
67127 static void
67128 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
67129 {
67130 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67131 + SEQ_printf(m, " .base: %p\n", NULL);
67132 +#else
67133 SEQ_printf(m, " .base: %pK\n", base);
67134 +#endif
67135 SEQ_printf(m, " .index: %d\n",
67136 base->index);
67137 SEQ_printf(m, " .resolution: %Lu nsecs\n",
67138 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
67139 {
67140 struct proc_dir_entry *pe;
67141
67142 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67143 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
67144 +#else
67145 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
67146 +#endif
67147 if (!pe)
67148 return -ENOMEM;
67149 return 0;
67150 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
67151 index 0b537f2..9e71eca 100644
67152 --- a/kernel/time/timer_stats.c
67153 +++ b/kernel/time/timer_stats.c
67154 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
67155 static unsigned long nr_entries;
67156 static struct entry entries[MAX_ENTRIES];
67157
67158 -static atomic_t overflow_count;
67159 +static atomic_unchecked_t overflow_count;
67160
67161 /*
67162 * The entries are in a hash-table, for fast lookup:
67163 @@ -140,7 +140,7 @@ static void reset_entries(void)
67164 nr_entries = 0;
67165 memset(entries, 0, sizeof(entries));
67166 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
67167 - atomic_set(&overflow_count, 0);
67168 + atomic_set_unchecked(&overflow_count, 0);
67169 }
67170
67171 static struct entry *alloc_entry(void)
67172 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67173 if (likely(entry))
67174 entry->count++;
67175 else
67176 - atomic_inc(&overflow_count);
67177 + atomic_inc_unchecked(&overflow_count);
67178
67179 out_unlock:
67180 raw_spin_unlock_irqrestore(lock, flags);
67181 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67182
67183 static void print_name_offset(struct seq_file *m, unsigned long addr)
67184 {
67185 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67186 + seq_printf(m, "<%p>", NULL);
67187 +#else
67188 char symname[KSYM_NAME_LEN];
67189
67190 if (lookup_symbol_name(addr, symname) < 0)
67191 seq_printf(m, "<%p>", (void *)addr);
67192 else
67193 seq_printf(m, "%s", symname);
67194 +#endif
67195 }
67196
67197 static int tstats_show(struct seq_file *m, void *v)
67198 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
67199
67200 seq_puts(m, "Timer Stats Version: v0.2\n");
67201 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
67202 - if (atomic_read(&overflow_count))
67203 + if (atomic_read_unchecked(&overflow_count))
67204 seq_printf(m, "Overflow: %d entries\n",
67205 - atomic_read(&overflow_count));
67206 + atomic_read_unchecked(&overflow_count));
67207
67208 for (i = 0; i < nr_entries; i++) {
67209 entry = entries + i;
67210 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
67211 {
67212 struct proc_dir_entry *pe;
67213
67214 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67215 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
67216 +#else
67217 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
67218 +#endif
67219 if (!pe)
67220 return -ENOMEM;
67221 return 0;
67222 diff --git a/kernel/timer.c b/kernel/timer.c
67223 index 9c3c62b..441690e 100644
67224 --- a/kernel/timer.c
67225 +++ b/kernel/timer.c
67226 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
67227 /*
67228 * This function runs timers and the timer-tq in bottom half context.
67229 */
67230 -static void run_timer_softirq(struct softirq_action *h)
67231 +static void run_timer_softirq(void)
67232 {
67233 struct tvec_base *base = __this_cpu_read(tvec_bases);
67234
67235 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
67236 index 16fc34a..efd8bb8 100644
67237 --- a/kernel/trace/blktrace.c
67238 +++ b/kernel/trace/blktrace.c
67239 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
67240 struct blk_trace *bt = filp->private_data;
67241 char buf[16];
67242
67243 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
67244 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
67245
67246 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
67247 }
67248 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
67249 return 1;
67250
67251 bt = buf->chan->private_data;
67252 - atomic_inc(&bt->dropped);
67253 + atomic_inc_unchecked(&bt->dropped);
67254 return 0;
67255 }
67256
67257 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
67258
67259 bt->dir = dir;
67260 bt->dev = dev;
67261 - atomic_set(&bt->dropped, 0);
67262 + atomic_set_unchecked(&bt->dropped, 0);
67263
67264 ret = -EIO;
67265 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
67266 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
67267 index 25b4f4d..6f4772d 100644
67268 --- a/kernel/trace/ftrace.c
67269 +++ b/kernel/trace/ftrace.c
67270 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
67271 if (unlikely(ftrace_disabled))
67272 return 0;
67273
67274 + ret = ftrace_arch_code_modify_prepare();
67275 + FTRACE_WARN_ON(ret);
67276 + if (ret)
67277 + return 0;
67278 +
67279 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67280 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67281 if (ret) {
67282 ftrace_bug(ret, ip);
67283 - return 0;
67284 }
67285 - return 1;
67286 + return ret ? 0 : 1;
67287 }
67288
67289 /*
67290 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67291
67292 int
67293 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67294 - void *data)
67295 + void *data)
67296 {
67297 struct ftrace_func_probe *entry;
67298 struct ftrace_page *pg;
67299 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67300 index f2bd275..adaf3a2 100644
67301 --- a/kernel/trace/trace.c
67302 +++ b/kernel/trace/trace.c
67303 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67304 };
67305 #endif
67306
67307 -static struct dentry *d_tracer;
67308 -
67309 struct dentry *tracing_init_dentry(void)
67310 {
67311 + static struct dentry *d_tracer;
67312 static int once;
67313
67314 if (d_tracer)
67315 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67316 return d_tracer;
67317 }
67318
67319 -static struct dentry *d_percpu;
67320 -
67321 struct dentry *tracing_dentry_percpu(void)
67322 {
67323 + static struct dentry *d_percpu;
67324 static int once;
67325 struct dentry *d_tracer;
67326
67327 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67328 index c212a7f..7b02394 100644
67329 --- a/kernel/trace/trace_events.c
67330 +++ b/kernel/trace/trace_events.c
67331 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67332 struct ftrace_module_file_ops {
67333 struct list_head list;
67334 struct module *mod;
67335 - struct file_operations id;
67336 - struct file_operations enable;
67337 - struct file_operations format;
67338 - struct file_operations filter;
67339 };
67340
67341 static struct ftrace_module_file_ops *
67342 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67343
67344 file_ops->mod = mod;
67345
67346 - file_ops->id = ftrace_event_id_fops;
67347 - file_ops->id.owner = mod;
67348 -
67349 - file_ops->enable = ftrace_enable_fops;
67350 - file_ops->enable.owner = mod;
67351 -
67352 - file_ops->filter = ftrace_event_filter_fops;
67353 - file_ops->filter.owner = mod;
67354 -
67355 - file_ops->format = ftrace_event_format_fops;
67356 - file_ops->format.owner = mod;
67357 + pax_open_kernel();
67358 + *(void **)&mod->trace_id.owner = mod;
67359 + *(void **)&mod->trace_enable.owner = mod;
67360 + *(void **)&mod->trace_filter.owner = mod;
67361 + *(void **)&mod->trace_format.owner = mod;
67362 + pax_close_kernel();
67363
67364 list_add(&file_ops->list, &ftrace_module_file_list);
67365
67366 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67367
67368 for_each_event(call, start, end) {
67369 __trace_add_event_call(*call, mod,
67370 - &file_ops->id, &file_ops->enable,
67371 - &file_ops->filter, &file_ops->format);
67372 + &mod->trace_id, &mod->trace_enable,
67373 + &mod->trace_filter, &mod->trace_format);
67374 }
67375 }
67376
67377 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67378 index 00d527c..7c5b1a3 100644
67379 --- a/kernel/trace/trace_kprobe.c
67380 +++ b/kernel/trace/trace_kprobe.c
67381 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67382 long ret;
67383 int maxlen = get_rloc_len(*(u32 *)dest);
67384 u8 *dst = get_rloc_data(dest);
67385 - u8 *src = addr;
67386 + const u8 __user *src = (const u8 __force_user *)addr;
67387 mm_segment_t old_fs = get_fs();
67388 if (!maxlen)
67389 return;
67390 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67391 pagefault_disable();
67392 do
67393 ret = __copy_from_user_inatomic(dst++, src++, 1);
67394 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67395 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67396 dst[-1] = '\0';
67397 pagefault_enable();
67398 set_fs(old_fs);
67399 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67400 ((u8 *)get_rloc_data(dest))[0] = '\0';
67401 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67402 } else
67403 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67404 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67405 get_rloc_offs(*(u32 *)dest));
67406 }
67407 /* Return the length of string -- including null terminal byte */
67408 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67409 set_fs(KERNEL_DS);
67410 pagefault_disable();
67411 do {
67412 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67413 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67414 len++;
67415 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67416 pagefault_enable();
67417 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67418 index fd3c8aa..5f324a6 100644
67419 --- a/kernel/trace/trace_mmiotrace.c
67420 +++ b/kernel/trace/trace_mmiotrace.c
67421 @@ -24,7 +24,7 @@ struct header_iter {
67422 static struct trace_array *mmio_trace_array;
67423 static bool overrun_detected;
67424 static unsigned long prev_overruns;
67425 -static atomic_t dropped_count;
67426 +static atomic_unchecked_t dropped_count;
67427
67428 static void mmio_reset_data(struct trace_array *tr)
67429 {
67430 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67431
67432 static unsigned long count_overruns(struct trace_iterator *iter)
67433 {
67434 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
67435 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67436 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67437
67438 if (over > prev_overruns)
67439 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67440 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67441 sizeof(*entry), 0, pc);
67442 if (!event) {
67443 - atomic_inc(&dropped_count);
67444 + atomic_inc_unchecked(&dropped_count);
67445 return;
67446 }
67447 entry = ring_buffer_event_data(event);
67448 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67449 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67450 sizeof(*entry), 0, pc);
67451 if (!event) {
67452 - atomic_inc(&dropped_count);
67453 + atomic_inc_unchecked(&dropped_count);
67454 return;
67455 }
67456 entry = ring_buffer_event_data(event);
67457 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67458 index 5199930..26c73a0 100644
67459 --- a/kernel/trace/trace_output.c
67460 +++ b/kernel/trace/trace_output.c
67461 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67462
67463 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67464 if (!IS_ERR(p)) {
67465 - p = mangle_path(s->buffer + s->len, p, "\n");
67466 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67467 if (p) {
67468 s->len = p - s->buffer;
67469 return 1;
67470 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67471 index 77575b3..6e623d1 100644
67472 --- a/kernel/trace/trace_stack.c
67473 +++ b/kernel/trace/trace_stack.c
67474 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67475 return;
67476
67477 /* we do not handle interrupt stacks yet */
67478 - if (!object_is_on_stack(&this_size))
67479 + if (!object_starts_on_stack(&this_size))
67480 return;
67481
67482 local_irq_save(flags);
67483 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67484 index 209b379..7f76423 100644
67485 --- a/kernel/trace/trace_workqueue.c
67486 +++ b/kernel/trace/trace_workqueue.c
67487 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67488 int cpu;
67489 pid_t pid;
67490 /* Can be inserted from interrupt or user context, need to be atomic */
67491 - atomic_t inserted;
67492 + atomic_unchecked_t inserted;
67493 /*
67494 * Don't need to be atomic, works are serialized in a single workqueue thread
67495 * on a single CPU.
67496 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67497 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67498 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67499 if (node->pid == wq_thread->pid) {
67500 - atomic_inc(&node->inserted);
67501 + atomic_inc_unchecked(&node->inserted);
67502 goto found;
67503 }
67504 }
67505 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67506 tsk = get_pid_task(pid, PIDTYPE_PID);
67507 if (tsk) {
67508 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67509 - atomic_read(&cws->inserted), cws->executed,
67510 + atomic_read_unchecked(&cws->inserted), cws->executed,
67511 tsk->comm);
67512 put_task_struct(tsk);
67513 }
67514 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67515 index 82928f5..92da771 100644
67516 --- a/lib/Kconfig.debug
67517 +++ b/lib/Kconfig.debug
67518 @@ -1103,6 +1103,7 @@ config LATENCYTOP
67519 depends on DEBUG_KERNEL
67520 depends on STACKTRACE_SUPPORT
67521 depends on PROC_FS
67522 + depends on !GRKERNSEC_HIDESYM
67523 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67524 select KALLSYMS
67525 select KALLSYMS_ALL
67526 diff --git a/lib/bitmap.c b/lib/bitmap.c
67527 index 0d4a127..33a06c7 100644
67528 --- a/lib/bitmap.c
67529 +++ b/lib/bitmap.c
67530 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67531 {
67532 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67533 u32 chunk;
67534 - const char __user __force *ubuf = (const char __user __force *)buf;
67535 + const char __user *ubuf = (const char __force_user *)buf;
67536
67537 bitmap_zero(maskp, nmaskbits);
67538
67539 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67540 {
67541 if (!access_ok(VERIFY_READ, ubuf, ulen))
67542 return -EFAULT;
67543 - return __bitmap_parse((const char __force *)ubuf,
67544 + return __bitmap_parse((const char __force_kernel *)ubuf,
67545 ulen, 1, maskp, nmaskbits);
67546
67547 }
67548 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67549 {
67550 unsigned a, b;
67551 int c, old_c, totaldigits;
67552 - const char __user __force *ubuf = (const char __user __force *)buf;
67553 + const char __user *ubuf = (const char __force_user *)buf;
67554 int exp_digit, in_range;
67555
67556 totaldigits = c = 0;
67557 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67558 {
67559 if (!access_ok(VERIFY_READ, ubuf, ulen))
67560 return -EFAULT;
67561 - return __bitmap_parselist((const char __force *)ubuf,
67562 + return __bitmap_parselist((const char __force_kernel *)ubuf,
67563 ulen, 1, maskp, nmaskbits);
67564 }
67565 EXPORT_SYMBOL(bitmap_parselist_user);
67566 diff --git a/lib/bug.c b/lib/bug.c
67567 index 1955209..cbbb2ad 100644
67568 --- a/lib/bug.c
67569 +++ b/lib/bug.c
67570 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67571 return BUG_TRAP_TYPE_NONE;
67572
67573 bug = find_bug(bugaddr);
67574 + if (!bug)
67575 + return BUG_TRAP_TYPE_NONE;
67576
67577 file = NULL;
67578 line = 0;
67579 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67580 index a78b7c6..2c73084 100644
67581 --- a/lib/debugobjects.c
67582 +++ b/lib/debugobjects.c
67583 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67584 if (limit > 4)
67585 return;
67586
67587 - is_on_stack = object_is_on_stack(addr);
67588 + is_on_stack = object_starts_on_stack(addr);
67589 if (is_on_stack == onstack)
67590 return;
67591
67592 diff --git a/lib/devres.c b/lib/devres.c
67593 index 7c0e953..f642b5c 100644
67594 --- a/lib/devres.c
67595 +++ b/lib/devres.c
67596 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67597 void devm_iounmap(struct device *dev, void __iomem *addr)
67598 {
67599 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67600 - (void *)addr));
67601 + (void __force *)addr));
67602 iounmap(addr);
67603 }
67604 EXPORT_SYMBOL(devm_iounmap);
67605 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67606 {
67607 ioport_unmap(addr);
67608 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67609 - devm_ioport_map_match, (void *)addr));
67610 + devm_ioport_map_match, (void __force *)addr));
67611 }
67612 EXPORT_SYMBOL(devm_ioport_unmap);
67613
67614 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67615 index fea790a..ebb0e82 100644
67616 --- a/lib/dma-debug.c
67617 +++ b/lib/dma-debug.c
67618 @@ -925,7 +925,7 @@ out:
67619
67620 static void check_for_stack(struct device *dev, void *addr)
67621 {
67622 - if (object_is_on_stack(addr))
67623 + if (object_starts_on_stack(addr))
67624 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67625 "stack [addr=%p]\n", addr);
67626 }
67627 diff --git a/lib/extable.c b/lib/extable.c
67628 index 4cac81e..63e9b8f 100644
67629 --- a/lib/extable.c
67630 +++ b/lib/extable.c
67631 @@ -13,6 +13,7 @@
67632 #include <linux/init.h>
67633 #include <linux/sort.h>
67634 #include <asm/uaccess.h>
67635 +#include <asm/pgtable.h>
67636
67637 #ifndef ARCH_HAS_SORT_EXTABLE
67638 /*
67639 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67640 void sort_extable(struct exception_table_entry *start,
67641 struct exception_table_entry *finish)
67642 {
67643 + pax_open_kernel();
67644 sort(start, finish - start, sizeof(struct exception_table_entry),
67645 cmp_ex, NULL);
67646 + pax_close_kernel();
67647 }
67648
67649 #ifdef CONFIG_MODULES
67650 diff --git a/lib/inflate.c b/lib/inflate.c
67651 index 013a761..c28f3fc 100644
67652 --- a/lib/inflate.c
67653 +++ b/lib/inflate.c
67654 @@ -269,7 +269,7 @@ static void free(void *where)
67655 malloc_ptr = free_mem_ptr;
67656 }
67657 #else
67658 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67659 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67660 #define free(a) kfree(a)
67661 #endif
67662
67663 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67664 index bd2bea9..6b3c95e 100644
67665 --- a/lib/is_single_threaded.c
67666 +++ b/lib/is_single_threaded.c
67667 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67668 struct task_struct *p, *t;
67669 bool ret;
67670
67671 + if (!mm)
67672 + return true;
67673 +
67674 if (atomic_read(&task->signal->live) != 1)
67675 return false;
67676
67677 diff --git a/lib/kref.c b/lib/kref.c
67678 index 3efb882..8492f4c 100644
67679 --- a/lib/kref.c
67680 +++ b/lib/kref.c
67681 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67682 */
67683 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67684 {
67685 - WARN_ON(release == NULL);
67686 + BUG_ON(release == NULL);
67687 WARN_ON(release == (void (*)(struct kref *))kfree);
67688
67689 if (atomic_dec_and_test(&kref->refcount)) {
67690 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67691 index d9df745..e73c2fe 100644
67692 --- a/lib/radix-tree.c
67693 +++ b/lib/radix-tree.c
67694 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67695 int nr;
67696 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67697 };
67698 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67699 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67700
67701 static inline void *ptr_to_indirect(void *ptr)
67702 {
67703 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67704 index 993599e..84dc70e 100644
67705 --- a/lib/vsprintf.c
67706 +++ b/lib/vsprintf.c
67707 @@ -16,6 +16,9 @@
67708 * - scnprintf and vscnprintf
67709 */
67710
67711 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67712 +#define __INCLUDED_BY_HIDESYM 1
67713 +#endif
67714 #include <stdarg.h>
67715 #include <linux/module.h>
67716 #include <linux/types.h>
67717 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67718 char sym[KSYM_SYMBOL_LEN];
67719 if (ext == 'B')
67720 sprint_backtrace(sym, value);
67721 - else if (ext != 'f' && ext != 's')
67722 + else if (ext != 'f' && ext != 's' && ext != 'a')
67723 sprint_symbol(sym, value);
67724 else
67725 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67726 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67727 return string(buf, end, uuid, spec);
67728 }
67729
67730 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67731 +int kptr_restrict __read_mostly = 2;
67732 +#else
67733 int kptr_restrict __read_mostly;
67734 +#endif
67735
67736 /*
67737 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67738 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67739 * - 'S' For symbolic direct pointers with offset
67740 * - 's' For symbolic direct pointers without offset
67741 * - 'B' For backtraced symbolic direct pointers with offset
67742 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67743 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67744 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67745 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67746 * - 'M' For a 6-byte MAC address, it prints the address in the
67747 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67748 {
67749 if (!ptr && *fmt != 'K') {
67750 /*
67751 - * Print (null) with the same width as a pointer so it makes
67752 + * Print (nil) with the same width as a pointer so it makes
67753 * tabular output look nice.
67754 */
67755 if (spec.field_width == -1)
67756 spec.field_width = 2 * sizeof(void *);
67757 - return string(buf, end, "(null)", spec);
67758 + return string(buf, end, "(nil)", spec);
67759 }
67760
67761 switch (*fmt) {
67762 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67763 /* Fallthrough */
67764 case 'S':
67765 case 's':
67766 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67767 + break;
67768 +#else
67769 + return symbol_string(buf, end, ptr, spec, *fmt);
67770 +#endif
67771 + case 'A':
67772 + case 'a':
67773 case 'B':
67774 return symbol_string(buf, end, ptr, spec, *fmt);
67775 case 'R':
67776 @@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67777 typeof(type) value; \
67778 if (sizeof(type) == 8) { \
67779 args = PTR_ALIGN(args, sizeof(u32)); \
67780 - *(u32 *)&value = *(u32 *)args; \
67781 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67782 + *(u32 *)&value = *(const u32 *)args; \
67783 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67784 } else { \
67785 args = PTR_ALIGN(args, sizeof(type)); \
67786 - value = *(typeof(type) *)args; \
67787 + value = *(const typeof(type) *)args; \
67788 } \
67789 args += sizeof(type); \
67790 value; \
67791 @@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67792 case FORMAT_TYPE_STR: {
67793 const char *str_arg = args;
67794 args += strlen(str_arg) + 1;
67795 - str = string(str, end, (char *)str_arg, spec);
67796 + str = string(str, end, str_arg, spec);
67797 break;
67798 }
67799
67800 diff --git a/localversion-grsec b/localversion-grsec
67801 new file mode 100644
67802 index 0000000..7cd6065
67803 --- /dev/null
67804 +++ b/localversion-grsec
67805 @@ -0,0 +1 @@
67806 +-grsec
67807 diff --git a/mm/Kconfig b/mm/Kconfig
67808 index 011b110..b492af2 100644
67809 --- a/mm/Kconfig
67810 +++ b/mm/Kconfig
67811 @@ -241,10 +241,10 @@ config KSM
67812 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67813
67814 config DEFAULT_MMAP_MIN_ADDR
67815 - int "Low address space to protect from user allocation"
67816 + int "Low address space to protect from user allocation"
67817 depends on MMU
67818 - default 4096
67819 - help
67820 + default 65536
67821 + help
67822 This is the portion of low virtual memory which should be protected
67823 from userspace allocation. Keeping a user from writing to low pages
67824 can help reduce the impact of kernel NULL pointer bugs.
67825 diff --git a/mm/filemap.c b/mm/filemap.c
67826 index 03c5b0e..a01e793 100644
67827 --- a/mm/filemap.c
67828 +++ b/mm/filemap.c
67829 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67830 struct address_space *mapping = file->f_mapping;
67831
67832 if (!mapping->a_ops->readpage)
67833 - return -ENOEXEC;
67834 + return -ENODEV;
67835 file_accessed(file);
67836 vma->vm_ops = &generic_file_vm_ops;
67837 vma->vm_flags |= VM_CAN_NONLINEAR;
67838 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67839 *pos = i_size_read(inode);
67840
67841 if (limit != RLIM_INFINITY) {
67842 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67843 if (*pos >= limit) {
67844 send_sig(SIGXFSZ, current, 0);
67845 return -EFBIG;
67846 diff --git a/mm/fremap.c b/mm/fremap.c
67847 index 9ed4fd4..c42648d 100644
67848 --- a/mm/fremap.c
67849 +++ b/mm/fremap.c
67850 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67851 retry:
67852 vma = find_vma(mm, start);
67853
67854 +#ifdef CONFIG_PAX_SEGMEXEC
67855 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67856 + goto out;
67857 +#endif
67858 +
67859 /*
67860 * Make sure the vma is shared, that it supports prefaulting,
67861 * and that the remapped range is valid and fully within
67862 diff --git a/mm/highmem.c b/mm/highmem.c
67863 index 57d82c6..e9e0552 100644
67864 --- a/mm/highmem.c
67865 +++ b/mm/highmem.c
67866 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67867 * So no dangers, even with speculative execution.
67868 */
67869 page = pte_page(pkmap_page_table[i]);
67870 + pax_open_kernel();
67871 pte_clear(&init_mm, (unsigned long)page_address(page),
67872 &pkmap_page_table[i]);
67873 -
67874 + pax_close_kernel();
67875 set_page_address(page, NULL);
67876 need_flush = 1;
67877 }
67878 @@ -186,9 +187,11 @@ start:
67879 }
67880 }
67881 vaddr = PKMAP_ADDR(last_pkmap_nr);
67882 +
67883 + pax_open_kernel();
67884 set_pte_at(&init_mm, vaddr,
67885 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67886 -
67887 + pax_close_kernel();
67888 pkmap_count[last_pkmap_nr] = 1;
67889 set_page_address(page, (void *)vaddr);
67890
67891 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67892 index 33141f5..e56bef9 100644
67893 --- a/mm/huge_memory.c
67894 +++ b/mm/huge_memory.c
67895 @@ -703,7 +703,7 @@ out:
67896 * run pte_offset_map on the pmd, if an huge pmd could
67897 * materialize from under us from a different thread.
67898 */
67899 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67900 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67901 return VM_FAULT_OOM;
67902 /* if an huge pmd materialized from under us just retry later */
67903 if (unlikely(pmd_trans_huge(*pmd)))
67904 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67905 index 2316840..b418671 100644
67906 --- a/mm/hugetlb.c
67907 +++ b/mm/hugetlb.c
67908 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67909 return 1;
67910 }
67911
67912 +#ifdef CONFIG_PAX_SEGMEXEC
67913 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67914 +{
67915 + struct mm_struct *mm = vma->vm_mm;
67916 + struct vm_area_struct *vma_m;
67917 + unsigned long address_m;
67918 + pte_t *ptep_m;
67919 +
67920 + vma_m = pax_find_mirror_vma(vma);
67921 + if (!vma_m)
67922 + return;
67923 +
67924 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67925 + address_m = address + SEGMEXEC_TASK_SIZE;
67926 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67927 + get_page(page_m);
67928 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67929 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67930 +}
67931 +#endif
67932 +
67933 /*
67934 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67935 */
67936 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
67937 make_huge_pte(vma, new_page, 1));
67938 page_remove_rmap(old_page);
67939 hugepage_add_new_anon_rmap(new_page, vma, address);
67940 +
67941 +#ifdef CONFIG_PAX_SEGMEXEC
67942 + pax_mirror_huge_pte(vma, address, new_page);
67943 +#endif
67944 +
67945 /* Make the old page be freed below */
67946 new_page = old_page;
67947 mmu_notifier_invalidate_range_end(mm,
67948 @@ -2601,6 +2627,10 @@ retry:
67949 && (vma->vm_flags & VM_SHARED)));
67950 set_huge_pte_at(mm, address, ptep, new_pte);
67951
67952 +#ifdef CONFIG_PAX_SEGMEXEC
67953 + pax_mirror_huge_pte(vma, address, page);
67954 +#endif
67955 +
67956 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67957 /* Optimization, do the COW without a second fault */
67958 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67959 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67960 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67961 struct hstate *h = hstate_vma(vma);
67962
67963 +#ifdef CONFIG_PAX_SEGMEXEC
67964 + struct vm_area_struct *vma_m;
67965 +#endif
67966 +
67967 ptep = huge_pte_offset(mm, address);
67968 if (ptep) {
67969 entry = huge_ptep_get(ptep);
67970 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67971 VM_FAULT_SET_HINDEX(h - hstates);
67972 }
67973
67974 +#ifdef CONFIG_PAX_SEGMEXEC
67975 + vma_m = pax_find_mirror_vma(vma);
67976 + if (vma_m) {
67977 + unsigned long address_m;
67978 +
67979 + if (vma->vm_start > vma_m->vm_start) {
67980 + address_m = address;
67981 + address -= SEGMEXEC_TASK_SIZE;
67982 + vma = vma_m;
67983 + h = hstate_vma(vma);
67984 + } else
67985 + address_m = address + SEGMEXEC_TASK_SIZE;
67986 +
67987 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67988 + return VM_FAULT_OOM;
67989 + address_m &= HPAGE_MASK;
67990 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67991 + }
67992 +#endif
67993 +
67994 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67995 if (!ptep)
67996 return VM_FAULT_OOM;
67997 diff --git a/mm/internal.h b/mm/internal.h
67998 index 2189af4..f2ca332 100644
67999 --- a/mm/internal.h
68000 +++ b/mm/internal.h
68001 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
68002 * in mm/page_alloc.c
68003 */
68004 extern void __free_pages_bootmem(struct page *page, unsigned int order);
68005 +extern void free_compound_page(struct page *page);
68006 extern void prep_compound_page(struct page *page, unsigned long order);
68007 #ifdef CONFIG_MEMORY_FAILURE
68008 extern bool is_free_buddy_page(struct page *page);
68009 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
68010 index f3b2a00..61da94d 100644
68011 --- a/mm/kmemleak.c
68012 +++ b/mm/kmemleak.c
68013 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
68014
68015 for (i = 0; i < object->trace_len; i++) {
68016 void *ptr = (void *)object->trace[i];
68017 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
68018 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
68019 }
68020 }
68021
68022 diff --git a/mm/maccess.c b/mm/maccess.c
68023 index d53adf9..03a24bf 100644
68024 --- a/mm/maccess.c
68025 +++ b/mm/maccess.c
68026 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
68027 set_fs(KERNEL_DS);
68028 pagefault_disable();
68029 ret = __copy_from_user_inatomic(dst,
68030 - (__force const void __user *)src, size);
68031 + (const void __force_user *)src, size);
68032 pagefault_enable();
68033 set_fs(old_fs);
68034
68035 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
68036
68037 set_fs(KERNEL_DS);
68038 pagefault_disable();
68039 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
68040 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
68041 pagefault_enable();
68042 set_fs(old_fs);
68043
68044 diff --git a/mm/madvise.c b/mm/madvise.c
68045 index 74bf193..feb6fd3 100644
68046 --- a/mm/madvise.c
68047 +++ b/mm/madvise.c
68048 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
68049 pgoff_t pgoff;
68050 unsigned long new_flags = vma->vm_flags;
68051
68052 +#ifdef CONFIG_PAX_SEGMEXEC
68053 + struct vm_area_struct *vma_m;
68054 +#endif
68055 +
68056 switch (behavior) {
68057 case MADV_NORMAL:
68058 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
68059 @@ -110,6 +114,13 @@ success:
68060 /*
68061 * vm_flags is protected by the mmap_sem held in write mode.
68062 */
68063 +
68064 +#ifdef CONFIG_PAX_SEGMEXEC
68065 + vma_m = pax_find_mirror_vma(vma);
68066 + if (vma_m)
68067 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
68068 +#endif
68069 +
68070 vma->vm_flags = new_flags;
68071
68072 out:
68073 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68074 struct vm_area_struct ** prev,
68075 unsigned long start, unsigned long end)
68076 {
68077 +
68078 +#ifdef CONFIG_PAX_SEGMEXEC
68079 + struct vm_area_struct *vma_m;
68080 +#endif
68081 +
68082 *prev = vma;
68083 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
68084 return -EINVAL;
68085 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68086 zap_page_range(vma, start, end - start, &details);
68087 } else
68088 zap_page_range(vma, start, end - start, NULL);
68089 +
68090 +#ifdef CONFIG_PAX_SEGMEXEC
68091 + vma_m = pax_find_mirror_vma(vma);
68092 + if (vma_m) {
68093 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
68094 + struct zap_details details = {
68095 + .nonlinear_vma = vma_m,
68096 + .last_index = ULONG_MAX,
68097 + };
68098 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
68099 + } else
68100 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
68101 + }
68102 +#endif
68103 +
68104 return 0;
68105 }
68106
68107 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
68108 if (end < start)
68109 goto out;
68110
68111 +#ifdef CONFIG_PAX_SEGMEXEC
68112 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
68113 + if (end > SEGMEXEC_TASK_SIZE)
68114 + goto out;
68115 + } else
68116 +#endif
68117 +
68118 + if (end > TASK_SIZE)
68119 + goto out;
68120 +
68121 error = 0;
68122 if (end == start)
68123 goto out;
68124 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
68125 index 06d3479..0778eef 100644
68126 --- a/mm/memory-failure.c
68127 +++ b/mm/memory-failure.c
68128 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
68129
68130 int sysctl_memory_failure_recovery __read_mostly = 1;
68131
68132 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68133 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68134
68135 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68136
68137 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
68138 si.si_signo = SIGBUS;
68139 si.si_errno = 0;
68140 si.si_code = BUS_MCEERR_AO;
68141 - si.si_addr = (void *)addr;
68142 + si.si_addr = (void __user *)addr;
68143 #ifdef __ARCH_SI_TRAPNO
68144 si.si_trapno = trapno;
68145 #endif
68146 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68147 }
68148
68149 nr_pages = 1 << compound_trans_order(hpage);
68150 - atomic_long_add(nr_pages, &mce_bad_pages);
68151 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68152
68153 /*
68154 * We need/can do nothing about count=0 pages.
68155 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68156 if (!PageHWPoison(hpage)
68157 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68158 || (p != hpage && TestSetPageHWPoison(hpage))) {
68159 - atomic_long_sub(nr_pages, &mce_bad_pages);
68160 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68161 return 0;
68162 }
68163 set_page_hwpoison_huge_page(hpage);
68164 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68165 }
68166 if (hwpoison_filter(p)) {
68167 if (TestClearPageHWPoison(p))
68168 - atomic_long_sub(nr_pages, &mce_bad_pages);
68169 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68170 unlock_page(hpage);
68171 put_page(hpage);
68172 return 0;
68173 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
68174 return 0;
68175 }
68176 if (TestClearPageHWPoison(p))
68177 - atomic_long_sub(nr_pages, &mce_bad_pages);
68178 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68179 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68180 return 0;
68181 }
68182 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
68183 */
68184 if (TestClearPageHWPoison(page)) {
68185 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68186 - atomic_long_sub(nr_pages, &mce_bad_pages);
68187 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68188 freeit = 1;
68189 if (PageHuge(page))
68190 clear_page_hwpoison_huge_page(page);
68191 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
68192 }
68193 done:
68194 if (!PageHWPoison(hpage))
68195 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68196 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68197 set_page_hwpoison_huge_page(hpage);
68198 dequeue_hwpoisoned_huge_page(hpage);
68199 /* keep elevated page count for bad page */
68200 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
68201 return ret;
68202
68203 done:
68204 - atomic_long_add(1, &mce_bad_pages);
68205 + atomic_long_add_unchecked(1, &mce_bad_pages);
68206 SetPageHWPoison(page);
68207 /* keep elevated page count for bad page */
68208 return ret;
68209 diff --git a/mm/memory.c b/mm/memory.c
68210 index 829d437..3d3926a 100644
68211 --- a/mm/memory.c
68212 +++ b/mm/memory.c
68213 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
68214 return;
68215
68216 pmd = pmd_offset(pud, start);
68217 +
68218 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
68219 pud_clear(pud);
68220 pmd_free_tlb(tlb, pmd, start);
68221 +#endif
68222 +
68223 }
68224
68225 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68226 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68227 if (end - 1 > ceiling - 1)
68228 return;
68229
68230 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
68231 pud = pud_offset(pgd, start);
68232 pgd_clear(pgd);
68233 pud_free_tlb(tlb, pud, start);
68234 +#endif
68235 +
68236 }
68237
68238 /*
68239 @@ -1566,12 +1573,6 @@ no_page_table:
68240 return page;
68241 }
68242
68243 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68244 -{
68245 - return stack_guard_page_start(vma, addr) ||
68246 - stack_guard_page_end(vma, addr+PAGE_SIZE);
68247 -}
68248 -
68249 /**
68250 * __get_user_pages() - pin user pages in memory
68251 * @tsk: task_struct of target task
68252 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68253 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
68254 i = 0;
68255
68256 - do {
68257 + while (nr_pages) {
68258 struct vm_area_struct *vma;
68259
68260 - vma = find_extend_vma(mm, start);
68261 + vma = find_vma(mm, start);
68262 if (!vma && in_gate_area(mm, start)) {
68263 unsigned long pg = start & PAGE_MASK;
68264 pgd_t *pgd;
68265 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68266 goto next_page;
68267 }
68268
68269 - if (!vma ||
68270 + if (!vma || start < vma->vm_start ||
68271 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
68272 !(vm_flags & vma->vm_flags))
68273 return i ? : -EFAULT;
68274 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68275 int ret;
68276 unsigned int fault_flags = 0;
68277
68278 - /* For mlock, just skip the stack guard page. */
68279 - if (foll_flags & FOLL_MLOCK) {
68280 - if (stack_guard_page(vma, start))
68281 - goto next_page;
68282 - }
68283 if (foll_flags & FOLL_WRITE)
68284 fault_flags |= FAULT_FLAG_WRITE;
68285 if (nonblocking)
68286 @@ -1800,7 +1796,7 @@ next_page:
68287 start += PAGE_SIZE;
68288 nr_pages--;
68289 } while (nr_pages && start < vma->vm_end);
68290 - } while (nr_pages);
68291 + }
68292 return i;
68293 }
68294 EXPORT_SYMBOL(__get_user_pages);
68295 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68296 page_add_file_rmap(page);
68297 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68298
68299 +#ifdef CONFIG_PAX_SEGMEXEC
68300 + pax_mirror_file_pte(vma, addr, page, ptl);
68301 +#endif
68302 +
68303 retval = 0;
68304 pte_unmap_unlock(pte, ptl);
68305 return retval;
68306 @@ -2041,10 +2041,22 @@ out:
68307 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68308 struct page *page)
68309 {
68310 +
68311 +#ifdef CONFIG_PAX_SEGMEXEC
68312 + struct vm_area_struct *vma_m;
68313 +#endif
68314 +
68315 if (addr < vma->vm_start || addr >= vma->vm_end)
68316 return -EFAULT;
68317 if (!page_count(page))
68318 return -EINVAL;
68319 +
68320 +#ifdef CONFIG_PAX_SEGMEXEC
68321 + vma_m = pax_find_mirror_vma(vma);
68322 + if (vma_m)
68323 + vma_m->vm_flags |= VM_INSERTPAGE;
68324 +#endif
68325 +
68326 vma->vm_flags |= VM_INSERTPAGE;
68327 return insert_page(vma, addr, page, vma->vm_page_prot);
68328 }
68329 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68330 unsigned long pfn)
68331 {
68332 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68333 + BUG_ON(vma->vm_mirror);
68334
68335 if (addr < vma->vm_start || addr >= vma->vm_end)
68336 return -EFAULT;
68337 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68338 copy_user_highpage(dst, src, va, vma);
68339 }
68340
68341 +#ifdef CONFIG_PAX_SEGMEXEC
68342 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68343 +{
68344 + struct mm_struct *mm = vma->vm_mm;
68345 + spinlock_t *ptl;
68346 + pte_t *pte, entry;
68347 +
68348 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68349 + entry = *pte;
68350 + if (!pte_present(entry)) {
68351 + if (!pte_none(entry)) {
68352 + BUG_ON(pte_file(entry));
68353 + free_swap_and_cache(pte_to_swp_entry(entry));
68354 + pte_clear_not_present_full(mm, address, pte, 0);
68355 + }
68356 + } else {
68357 + struct page *page;
68358 +
68359 + flush_cache_page(vma, address, pte_pfn(entry));
68360 + entry = ptep_clear_flush(vma, address, pte);
68361 + BUG_ON(pte_dirty(entry));
68362 + page = vm_normal_page(vma, address, entry);
68363 + if (page) {
68364 + update_hiwater_rss(mm);
68365 + if (PageAnon(page))
68366 + dec_mm_counter_fast(mm, MM_ANONPAGES);
68367 + else
68368 + dec_mm_counter_fast(mm, MM_FILEPAGES);
68369 + page_remove_rmap(page);
68370 + page_cache_release(page);
68371 + }
68372 + }
68373 + pte_unmap_unlock(pte, ptl);
68374 +}
68375 +
68376 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
68377 + *
68378 + * the ptl of the lower mapped page is held on entry and is not released on exit
68379 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68380 + */
68381 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68382 +{
68383 + struct mm_struct *mm = vma->vm_mm;
68384 + unsigned long address_m;
68385 + spinlock_t *ptl_m;
68386 + struct vm_area_struct *vma_m;
68387 + pmd_t *pmd_m;
68388 + pte_t *pte_m, entry_m;
68389 +
68390 + BUG_ON(!page_m || !PageAnon(page_m));
68391 +
68392 + vma_m = pax_find_mirror_vma(vma);
68393 + if (!vma_m)
68394 + return;
68395 +
68396 + BUG_ON(!PageLocked(page_m));
68397 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68398 + address_m = address + SEGMEXEC_TASK_SIZE;
68399 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68400 + pte_m = pte_offset_map(pmd_m, address_m);
68401 + ptl_m = pte_lockptr(mm, pmd_m);
68402 + if (ptl != ptl_m) {
68403 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68404 + if (!pte_none(*pte_m))
68405 + goto out;
68406 + }
68407 +
68408 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68409 + page_cache_get(page_m);
68410 + page_add_anon_rmap(page_m, vma_m, address_m);
68411 + inc_mm_counter_fast(mm, MM_ANONPAGES);
68412 + set_pte_at(mm, address_m, pte_m, entry_m);
68413 + update_mmu_cache(vma_m, address_m, entry_m);
68414 +out:
68415 + if (ptl != ptl_m)
68416 + spin_unlock(ptl_m);
68417 + pte_unmap(pte_m);
68418 + unlock_page(page_m);
68419 +}
68420 +
68421 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68422 +{
68423 + struct mm_struct *mm = vma->vm_mm;
68424 + unsigned long address_m;
68425 + spinlock_t *ptl_m;
68426 + struct vm_area_struct *vma_m;
68427 + pmd_t *pmd_m;
68428 + pte_t *pte_m, entry_m;
68429 +
68430 + BUG_ON(!page_m || PageAnon(page_m));
68431 +
68432 + vma_m = pax_find_mirror_vma(vma);
68433 + if (!vma_m)
68434 + return;
68435 +
68436 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68437 + address_m = address + SEGMEXEC_TASK_SIZE;
68438 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68439 + pte_m = pte_offset_map(pmd_m, address_m);
68440 + ptl_m = pte_lockptr(mm, pmd_m);
68441 + if (ptl != ptl_m) {
68442 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68443 + if (!pte_none(*pte_m))
68444 + goto out;
68445 + }
68446 +
68447 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68448 + page_cache_get(page_m);
68449 + page_add_file_rmap(page_m);
68450 + inc_mm_counter_fast(mm, MM_FILEPAGES);
68451 + set_pte_at(mm, address_m, pte_m, entry_m);
68452 + update_mmu_cache(vma_m, address_m, entry_m);
68453 +out:
68454 + if (ptl != ptl_m)
68455 + spin_unlock(ptl_m);
68456 + pte_unmap(pte_m);
68457 +}
68458 +
68459 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68460 +{
68461 + struct mm_struct *mm = vma->vm_mm;
68462 + unsigned long address_m;
68463 + spinlock_t *ptl_m;
68464 + struct vm_area_struct *vma_m;
68465 + pmd_t *pmd_m;
68466 + pte_t *pte_m, entry_m;
68467 +
68468 + vma_m = pax_find_mirror_vma(vma);
68469 + if (!vma_m)
68470 + return;
68471 +
68472 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68473 + address_m = address + SEGMEXEC_TASK_SIZE;
68474 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68475 + pte_m = pte_offset_map(pmd_m, address_m);
68476 + ptl_m = pte_lockptr(mm, pmd_m);
68477 + if (ptl != ptl_m) {
68478 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68479 + if (!pte_none(*pte_m))
68480 + goto out;
68481 + }
68482 +
68483 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68484 + set_pte_at(mm, address_m, pte_m, entry_m);
68485 +out:
68486 + if (ptl != ptl_m)
68487 + spin_unlock(ptl_m);
68488 + pte_unmap(pte_m);
68489 +}
68490 +
68491 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68492 +{
68493 + struct page *page_m;
68494 + pte_t entry;
68495 +
68496 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68497 + goto out;
68498 +
68499 + entry = *pte;
68500 + page_m = vm_normal_page(vma, address, entry);
68501 + if (!page_m)
68502 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68503 + else if (PageAnon(page_m)) {
68504 + if (pax_find_mirror_vma(vma)) {
68505 + pte_unmap_unlock(pte, ptl);
68506 + lock_page(page_m);
68507 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68508 + if (pte_same(entry, *pte))
68509 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68510 + else
68511 + unlock_page(page_m);
68512 + }
68513 + } else
68514 + pax_mirror_file_pte(vma, address, page_m, ptl);
68515 +
68516 +out:
68517 + pte_unmap_unlock(pte, ptl);
68518 +}
68519 +#endif
68520 +
68521 /*
68522 * This routine handles present pages, when users try to write
68523 * to a shared page. It is done by copying the page to a new address
68524 @@ -2656,6 +2849,12 @@ gotten:
68525 */
68526 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68527 if (likely(pte_same(*page_table, orig_pte))) {
68528 +
68529 +#ifdef CONFIG_PAX_SEGMEXEC
68530 + if (pax_find_mirror_vma(vma))
68531 + BUG_ON(!trylock_page(new_page));
68532 +#endif
68533 +
68534 if (old_page) {
68535 if (!PageAnon(old_page)) {
68536 dec_mm_counter_fast(mm, MM_FILEPAGES);
68537 @@ -2707,6 +2906,10 @@ gotten:
68538 page_remove_rmap(old_page);
68539 }
68540
68541 +#ifdef CONFIG_PAX_SEGMEXEC
68542 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68543 +#endif
68544 +
68545 /* Free the old page.. */
68546 new_page = old_page;
68547 ret |= VM_FAULT_WRITE;
68548 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68549 swap_free(entry);
68550 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68551 try_to_free_swap(page);
68552 +
68553 +#ifdef CONFIG_PAX_SEGMEXEC
68554 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68555 +#endif
68556 +
68557 unlock_page(page);
68558 if (swapcache) {
68559 /*
68560 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68561
68562 /* No need to invalidate - it was non-present before */
68563 update_mmu_cache(vma, address, page_table);
68564 +
68565 +#ifdef CONFIG_PAX_SEGMEXEC
68566 + pax_mirror_anon_pte(vma, address, page, ptl);
68567 +#endif
68568 +
68569 unlock:
68570 pte_unmap_unlock(page_table, ptl);
68571 out:
68572 @@ -3028,40 +3241,6 @@ out_release:
68573 }
68574
68575 /*
68576 - * This is like a special single-page "expand_{down|up}wards()",
68577 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68578 - * doesn't hit another vma.
68579 - */
68580 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68581 -{
68582 - address &= PAGE_MASK;
68583 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68584 - struct vm_area_struct *prev = vma->vm_prev;
68585 -
68586 - /*
68587 - * Is there a mapping abutting this one below?
68588 - *
68589 - * That's only ok if it's the same stack mapping
68590 - * that has gotten split..
68591 - */
68592 - if (prev && prev->vm_end == address)
68593 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68594 -
68595 - expand_downwards(vma, address - PAGE_SIZE);
68596 - }
68597 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68598 - struct vm_area_struct *next = vma->vm_next;
68599 -
68600 - /* As VM_GROWSDOWN but s/below/above/ */
68601 - if (next && next->vm_start == address + PAGE_SIZE)
68602 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68603 -
68604 - expand_upwards(vma, address + PAGE_SIZE);
68605 - }
68606 - return 0;
68607 -}
68608 -
68609 -/*
68610 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68611 * but allow concurrent faults), and pte mapped but not yet locked.
68612 * We return with mmap_sem still held, but pte unmapped and unlocked.
68613 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68614 unsigned long address, pte_t *page_table, pmd_t *pmd,
68615 unsigned int flags)
68616 {
68617 - struct page *page;
68618 + struct page *page = NULL;
68619 spinlock_t *ptl;
68620 pte_t entry;
68621
68622 - pte_unmap(page_table);
68623 -
68624 - /* Check if we need to add a guard page to the stack */
68625 - if (check_stack_guard_page(vma, address) < 0)
68626 - return VM_FAULT_SIGBUS;
68627 -
68628 - /* Use the zero-page for reads */
68629 if (!(flags & FAULT_FLAG_WRITE)) {
68630 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68631 vma->vm_page_prot));
68632 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68633 + ptl = pte_lockptr(mm, pmd);
68634 + spin_lock(ptl);
68635 if (!pte_none(*page_table))
68636 goto unlock;
68637 goto setpte;
68638 }
68639
68640 /* Allocate our own private page. */
68641 + pte_unmap(page_table);
68642 +
68643 if (unlikely(anon_vma_prepare(vma)))
68644 goto oom;
68645 page = alloc_zeroed_user_highpage_movable(vma, address);
68646 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68647 if (!pte_none(*page_table))
68648 goto release;
68649
68650 +#ifdef CONFIG_PAX_SEGMEXEC
68651 + if (pax_find_mirror_vma(vma))
68652 + BUG_ON(!trylock_page(page));
68653 +#endif
68654 +
68655 inc_mm_counter_fast(mm, MM_ANONPAGES);
68656 page_add_new_anon_rmap(page, vma, address);
68657 setpte:
68658 @@ -3116,6 +3296,12 @@ setpte:
68659
68660 /* No need to invalidate - it was non-present before */
68661 update_mmu_cache(vma, address, page_table);
68662 +
68663 +#ifdef CONFIG_PAX_SEGMEXEC
68664 + if (page)
68665 + pax_mirror_anon_pte(vma, address, page, ptl);
68666 +#endif
68667 +
68668 unlock:
68669 pte_unmap_unlock(page_table, ptl);
68670 return 0;
68671 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68672 */
68673 /* Only go through if we didn't race with anybody else... */
68674 if (likely(pte_same(*page_table, orig_pte))) {
68675 +
68676 +#ifdef CONFIG_PAX_SEGMEXEC
68677 + if (anon && pax_find_mirror_vma(vma))
68678 + BUG_ON(!trylock_page(page));
68679 +#endif
68680 +
68681 flush_icache_page(vma, page);
68682 entry = mk_pte(page, vma->vm_page_prot);
68683 if (flags & FAULT_FLAG_WRITE)
68684 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68685
68686 /* no need to invalidate: a not-present page won't be cached */
68687 update_mmu_cache(vma, address, page_table);
68688 +
68689 +#ifdef CONFIG_PAX_SEGMEXEC
68690 + if (anon)
68691 + pax_mirror_anon_pte(vma, address, page, ptl);
68692 + else
68693 + pax_mirror_file_pte(vma, address, page, ptl);
68694 +#endif
68695 +
68696 } else {
68697 if (cow_page)
68698 mem_cgroup_uncharge_page(cow_page);
68699 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68700 if (flags & FAULT_FLAG_WRITE)
68701 flush_tlb_fix_spurious_fault(vma, address);
68702 }
68703 +
68704 +#ifdef CONFIG_PAX_SEGMEXEC
68705 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68706 + return 0;
68707 +#endif
68708 +
68709 unlock:
68710 pte_unmap_unlock(pte, ptl);
68711 return 0;
68712 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68713 pmd_t *pmd;
68714 pte_t *pte;
68715
68716 +#ifdef CONFIG_PAX_SEGMEXEC
68717 + struct vm_area_struct *vma_m;
68718 +#endif
68719 +
68720 __set_current_state(TASK_RUNNING);
68721
68722 count_vm_event(PGFAULT);
68723 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68724 if (unlikely(is_vm_hugetlb_page(vma)))
68725 return hugetlb_fault(mm, vma, address, flags);
68726
68727 +#ifdef CONFIG_PAX_SEGMEXEC
68728 + vma_m = pax_find_mirror_vma(vma);
68729 + if (vma_m) {
68730 + unsigned long address_m;
68731 + pgd_t *pgd_m;
68732 + pud_t *pud_m;
68733 + pmd_t *pmd_m;
68734 +
68735 + if (vma->vm_start > vma_m->vm_start) {
68736 + address_m = address;
68737 + address -= SEGMEXEC_TASK_SIZE;
68738 + vma = vma_m;
68739 + } else
68740 + address_m = address + SEGMEXEC_TASK_SIZE;
68741 +
68742 + pgd_m = pgd_offset(mm, address_m);
68743 + pud_m = pud_alloc(mm, pgd_m, address_m);
68744 + if (!pud_m)
68745 + return VM_FAULT_OOM;
68746 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68747 + if (!pmd_m)
68748 + return VM_FAULT_OOM;
68749 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68750 + return VM_FAULT_OOM;
68751 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68752 + }
68753 +#endif
68754 +
68755 pgd = pgd_offset(mm, address);
68756 pud = pud_alloc(mm, pgd, address);
68757 if (!pud)
68758 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68759 * run pte_offset_map on the pmd, if an huge pmd could
68760 * materialize from under us from a different thread.
68761 */
68762 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68763 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68764 return VM_FAULT_OOM;
68765 /* if an huge pmd materialized from under us just retry later */
68766 if (unlikely(pmd_trans_huge(*pmd)))
68767 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68768 gate_vma.vm_start = FIXADDR_USER_START;
68769 gate_vma.vm_end = FIXADDR_USER_END;
68770 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68771 - gate_vma.vm_page_prot = __P101;
68772 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68773 /*
68774 * Make sure the vDSO gets into every core dump.
68775 * Dumping its contents makes post-mortem fully interpretable later
68776 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68777 index c3fdbcb..2e8ef90 100644
68778 --- a/mm/mempolicy.c
68779 +++ b/mm/mempolicy.c
68780 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68781 unsigned long vmstart;
68782 unsigned long vmend;
68783
68784 +#ifdef CONFIG_PAX_SEGMEXEC
68785 + struct vm_area_struct *vma_m;
68786 +#endif
68787 +
68788 vma = find_vma_prev(mm, start, &prev);
68789 if (!vma || vma->vm_start > start)
68790 return -EFAULT;
68791 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68792 err = policy_vma(vma, new_pol);
68793 if (err)
68794 goto out;
68795 +
68796 +#ifdef CONFIG_PAX_SEGMEXEC
68797 + vma_m = pax_find_mirror_vma(vma);
68798 + if (vma_m) {
68799 + err = policy_vma(vma_m, new_pol);
68800 + if (err)
68801 + goto out;
68802 + }
68803 +#endif
68804 +
68805 }
68806
68807 out:
68808 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68809
68810 if (end < start)
68811 return -EINVAL;
68812 +
68813 +#ifdef CONFIG_PAX_SEGMEXEC
68814 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68815 + if (end > SEGMEXEC_TASK_SIZE)
68816 + return -EINVAL;
68817 + } else
68818 +#endif
68819 +
68820 + if (end > TASK_SIZE)
68821 + return -EINVAL;
68822 +
68823 if (end == start)
68824 return 0;
68825
68826 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68827 if (!mm)
68828 goto out;
68829
68830 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68831 + if (mm != current->mm &&
68832 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68833 + err = -EPERM;
68834 + goto out;
68835 + }
68836 +#endif
68837 +
68838 /*
68839 * Check if this process has the right to modify the specified
68840 * process. The right exists if the process has administrative
68841 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68842 rcu_read_lock();
68843 tcred = __task_cred(task);
68844 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68845 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68846 - !capable(CAP_SYS_NICE)) {
68847 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68848 rcu_read_unlock();
68849 err = -EPERM;
68850 goto out;
68851 diff --git a/mm/migrate.c b/mm/migrate.c
68852 index 177aca4..ab3a744 100644
68853 --- a/mm/migrate.c
68854 +++ b/mm/migrate.c
68855 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68856 if (!mm)
68857 return -EINVAL;
68858
68859 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68860 + if (mm != current->mm &&
68861 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68862 + err = -EPERM;
68863 + goto out;
68864 + }
68865 +#endif
68866 +
68867 /*
68868 * Check if this process has the right to modify the specified
68869 * process. The right exists if the process has administrative
68870 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68871 rcu_read_lock();
68872 tcred = __task_cred(task);
68873 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68874 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68875 - !capable(CAP_SYS_NICE)) {
68876 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68877 rcu_read_unlock();
68878 err = -EPERM;
68879 goto out;
68880 diff --git a/mm/mlock.c b/mm/mlock.c
68881 index 4f4f53b..9511904 100644
68882 --- a/mm/mlock.c
68883 +++ b/mm/mlock.c
68884 @@ -13,6 +13,7 @@
68885 #include <linux/pagemap.h>
68886 #include <linux/mempolicy.h>
68887 #include <linux/syscalls.h>
68888 +#include <linux/security.h>
68889 #include <linux/sched.h>
68890 #include <linux/export.h>
68891 #include <linux/rmap.h>
68892 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68893 return -EINVAL;
68894 if (end == start)
68895 return 0;
68896 + if (end > TASK_SIZE)
68897 + return -EINVAL;
68898 +
68899 vma = find_vma_prev(current->mm, start, &prev);
68900 if (!vma || vma->vm_start > start)
68901 return -ENOMEM;
68902 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68903 for (nstart = start ; ; ) {
68904 vm_flags_t newflags;
68905
68906 +#ifdef CONFIG_PAX_SEGMEXEC
68907 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68908 + break;
68909 +#endif
68910 +
68911 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68912
68913 newflags = vma->vm_flags | VM_LOCKED;
68914 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68915 lock_limit >>= PAGE_SHIFT;
68916
68917 /* check against resource limits */
68918 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68919 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68920 error = do_mlock(start, len, 1);
68921 up_write(&current->mm->mmap_sem);
68922 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68923 static int do_mlockall(int flags)
68924 {
68925 struct vm_area_struct * vma, * prev = NULL;
68926 - unsigned int def_flags = 0;
68927
68928 if (flags & MCL_FUTURE)
68929 - def_flags = VM_LOCKED;
68930 - current->mm->def_flags = def_flags;
68931 + current->mm->def_flags |= VM_LOCKED;
68932 + else
68933 + current->mm->def_flags &= ~VM_LOCKED;
68934 if (flags == MCL_FUTURE)
68935 goto out;
68936
68937 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68938 vm_flags_t newflags;
68939
68940 +#ifdef CONFIG_PAX_SEGMEXEC
68941 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68942 + break;
68943 +#endif
68944 +
68945 + BUG_ON(vma->vm_end > TASK_SIZE);
68946 newflags = vma->vm_flags | VM_LOCKED;
68947 if (!(flags & MCL_CURRENT))
68948 newflags &= ~VM_LOCKED;
68949 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68950 lock_limit >>= PAGE_SHIFT;
68951
68952 ret = -ENOMEM;
68953 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68954 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68955 capable(CAP_IPC_LOCK))
68956 ret = do_mlockall(flags);
68957 diff --git a/mm/mmap.c b/mm/mmap.c
68958 index eae90af..44552cf 100644
68959 --- a/mm/mmap.c
68960 +++ b/mm/mmap.c
68961 @@ -46,6 +46,16 @@
68962 #define arch_rebalance_pgtables(addr, len) (addr)
68963 #endif
68964
68965 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68966 +{
68967 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68968 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68969 + up_read(&mm->mmap_sem);
68970 + BUG();
68971 + }
68972 +#endif
68973 +}
68974 +
68975 static void unmap_region(struct mm_struct *mm,
68976 struct vm_area_struct *vma, struct vm_area_struct *prev,
68977 unsigned long start, unsigned long end);
68978 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68979 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68980 *
68981 */
68982 -pgprot_t protection_map[16] = {
68983 +pgprot_t protection_map[16] __read_only = {
68984 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68985 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68986 };
68987
68988 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
68989 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68990 {
68991 - return __pgprot(pgprot_val(protection_map[vm_flags &
68992 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68993 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68994 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68995 +
68996 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68997 + if (!(__supported_pte_mask & _PAGE_NX) &&
68998 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68999 + (vm_flags & (VM_READ | VM_WRITE)))
69000 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
69001 +#endif
69002 +
69003 + return prot;
69004 }
69005 EXPORT_SYMBOL(vm_get_page_prot);
69006
69007 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
69008 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
69009 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
69010 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
69011 /*
69012 * Make sure vm_committed_as in one cacheline and not cacheline shared with
69013 * other variables. It can be updated by several CPUs frequently.
69014 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
69015 struct vm_area_struct *next = vma->vm_next;
69016
69017 might_sleep();
69018 + BUG_ON(vma->vm_mirror);
69019 if (vma->vm_ops && vma->vm_ops->close)
69020 vma->vm_ops->close(vma);
69021 if (vma->vm_file) {
69022 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
69023 * not page aligned -Ram Gupta
69024 */
69025 rlim = rlimit(RLIMIT_DATA);
69026 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
69027 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
69028 (mm->end_data - mm->start_data) > rlim)
69029 goto out;
69030 @@ -689,6 +711,12 @@ static int
69031 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
69032 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69033 {
69034 +
69035 +#ifdef CONFIG_PAX_SEGMEXEC
69036 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
69037 + return 0;
69038 +#endif
69039 +
69040 if (is_mergeable_vma(vma, file, vm_flags) &&
69041 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69042 if (vma->vm_pgoff == vm_pgoff)
69043 @@ -708,6 +736,12 @@ static int
69044 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69045 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69046 {
69047 +
69048 +#ifdef CONFIG_PAX_SEGMEXEC
69049 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
69050 + return 0;
69051 +#endif
69052 +
69053 if (is_mergeable_vma(vma, file, vm_flags) &&
69054 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69055 pgoff_t vm_pglen;
69056 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69057 struct vm_area_struct *vma_merge(struct mm_struct *mm,
69058 struct vm_area_struct *prev, unsigned long addr,
69059 unsigned long end, unsigned long vm_flags,
69060 - struct anon_vma *anon_vma, struct file *file,
69061 + struct anon_vma *anon_vma, struct file *file,
69062 pgoff_t pgoff, struct mempolicy *policy)
69063 {
69064 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
69065 struct vm_area_struct *area, *next;
69066 int err;
69067
69068 +#ifdef CONFIG_PAX_SEGMEXEC
69069 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
69070 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
69071 +
69072 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
69073 +#endif
69074 +
69075 /*
69076 * We later require that vma->vm_flags == vm_flags,
69077 * so this tests vma->vm_flags & VM_SPECIAL, too.
69078 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69079 if (next && next->vm_end == end) /* cases 6, 7, 8 */
69080 next = next->vm_next;
69081
69082 +#ifdef CONFIG_PAX_SEGMEXEC
69083 + if (prev)
69084 + prev_m = pax_find_mirror_vma(prev);
69085 + if (area)
69086 + area_m = pax_find_mirror_vma(area);
69087 + if (next)
69088 + next_m = pax_find_mirror_vma(next);
69089 +#endif
69090 +
69091 /*
69092 * Can it merge with the predecessor?
69093 */
69094 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69095 /* cases 1, 6 */
69096 err = vma_adjust(prev, prev->vm_start,
69097 next->vm_end, prev->vm_pgoff, NULL);
69098 - } else /* cases 2, 5, 7 */
69099 +
69100 +#ifdef CONFIG_PAX_SEGMEXEC
69101 + if (!err && prev_m)
69102 + err = vma_adjust(prev_m, prev_m->vm_start,
69103 + next_m->vm_end, prev_m->vm_pgoff, NULL);
69104 +#endif
69105 +
69106 + } else { /* cases 2, 5, 7 */
69107 err = vma_adjust(prev, prev->vm_start,
69108 end, prev->vm_pgoff, NULL);
69109 +
69110 +#ifdef CONFIG_PAX_SEGMEXEC
69111 + if (!err && prev_m)
69112 + err = vma_adjust(prev_m, prev_m->vm_start,
69113 + end_m, prev_m->vm_pgoff, NULL);
69114 +#endif
69115 +
69116 + }
69117 if (err)
69118 return NULL;
69119 khugepaged_enter_vma_merge(prev);
69120 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69121 mpol_equal(policy, vma_policy(next)) &&
69122 can_vma_merge_before(next, vm_flags,
69123 anon_vma, file, pgoff+pglen)) {
69124 - if (prev && addr < prev->vm_end) /* case 4 */
69125 + if (prev && addr < prev->vm_end) { /* case 4 */
69126 err = vma_adjust(prev, prev->vm_start,
69127 addr, prev->vm_pgoff, NULL);
69128 - else /* cases 3, 8 */
69129 +
69130 +#ifdef CONFIG_PAX_SEGMEXEC
69131 + if (!err && prev_m)
69132 + err = vma_adjust(prev_m, prev_m->vm_start,
69133 + addr_m, prev_m->vm_pgoff, NULL);
69134 +#endif
69135 +
69136 + } else { /* cases 3, 8 */
69137 err = vma_adjust(area, addr, next->vm_end,
69138 next->vm_pgoff - pglen, NULL);
69139 +
69140 +#ifdef CONFIG_PAX_SEGMEXEC
69141 + if (!err && area_m)
69142 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
69143 + next_m->vm_pgoff - pglen, NULL);
69144 +#endif
69145 +
69146 + }
69147 if (err)
69148 return NULL;
69149 khugepaged_enter_vma_merge(area);
69150 @@ -921,14 +1001,11 @@ none:
69151 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
69152 struct file *file, long pages)
69153 {
69154 - const unsigned long stack_flags
69155 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
69156 -
69157 if (file) {
69158 mm->shared_vm += pages;
69159 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
69160 mm->exec_vm += pages;
69161 - } else if (flags & stack_flags)
69162 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
69163 mm->stack_vm += pages;
69164 if (flags & (VM_RESERVED|VM_IO))
69165 mm->reserved_vm += pages;
69166 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69167 * (the exception is when the underlying filesystem is noexec
69168 * mounted, in which case we dont add PROT_EXEC.)
69169 */
69170 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69171 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69172 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
69173 prot |= PROT_EXEC;
69174
69175 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69176 /* Obtain the address to map to. we verify (or select) it and ensure
69177 * that it represents a valid section of the address space.
69178 */
69179 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
69180 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
69181 if (addr & ~PAGE_MASK)
69182 return addr;
69183
69184 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69185 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
69186 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
69187
69188 +#ifdef CONFIG_PAX_MPROTECT
69189 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69190 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69191 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
69192 + gr_log_rwxmmap(file);
69193 +
69194 +#ifdef CONFIG_PAX_EMUPLT
69195 + vm_flags &= ~VM_EXEC;
69196 +#else
69197 + return -EPERM;
69198 +#endif
69199 +
69200 + }
69201 +
69202 + if (!(vm_flags & VM_EXEC))
69203 + vm_flags &= ~VM_MAYEXEC;
69204 +#else
69205 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69206 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69207 +#endif
69208 + else
69209 + vm_flags &= ~VM_MAYWRITE;
69210 + }
69211 +#endif
69212 +
69213 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69214 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
69215 + vm_flags &= ~VM_PAGEEXEC;
69216 +#endif
69217 +
69218 if (flags & MAP_LOCKED)
69219 if (!can_do_mlock())
69220 return -EPERM;
69221 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69222 locked += mm->locked_vm;
69223 lock_limit = rlimit(RLIMIT_MEMLOCK);
69224 lock_limit >>= PAGE_SHIFT;
69225 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69226 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
69227 return -EAGAIN;
69228 }
69229 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69230 if (error)
69231 return error;
69232
69233 + if (!gr_acl_handle_mmap(file, prot))
69234 + return -EACCES;
69235 +
69236 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
69237 }
69238 EXPORT_SYMBOL(do_mmap_pgoff);
69239 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
69240 vm_flags_t vm_flags = vma->vm_flags;
69241
69242 /* If it was private or non-writable, the write bit is already clear */
69243 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
69244 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
69245 return 0;
69246
69247 /* The backer wishes to know when pages are first written to? */
69248 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
69249 unsigned long charged = 0;
69250 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
69251
69252 +#ifdef CONFIG_PAX_SEGMEXEC
69253 + struct vm_area_struct *vma_m = NULL;
69254 +#endif
69255 +
69256 + /*
69257 + * mm->mmap_sem is required to protect against another thread
69258 + * changing the mappings in case we sleep.
69259 + */
69260 + verify_mm_writelocked(mm);
69261 +
69262 /* Clear old maps */
69263 error = -ENOMEM;
69264 -munmap_back:
69265 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69266 if (vma && vma->vm_start < addr + len) {
69267 if (do_munmap(mm, addr, len))
69268 return -ENOMEM;
69269 - goto munmap_back;
69270 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69271 + BUG_ON(vma && vma->vm_start < addr + len);
69272 }
69273
69274 /* Check against address space limit. */
69275 @@ -1258,6 +1379,16 @@ munmap_back:
69276 goto unacct_error;
69277 }
69278
69279 +#ifdef CONFIG_PAX_SEGMEXEC
69280 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69281 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69282 + if (!vma_m) {
69283 + error = -ENOMEM;
69284 + goto free_vma;
69285 + }
69286 + }
69287 +#endif
69288 +
69289 vma->vm_mm = mm;
69290 vma->vm_start = addr;
69291 vma->vm_end = addr + len;
69292 @@ -1281,6 +1412,19 @@ munmap_back:
69293 error = file->f_op->mmap(file, vma);
69294 if (error)
69295 goto unmap_and_free_vma;
69296 +
69297 +#ifdef CONFIG_PAX_SEGMEXEC
69298 + if (vma_m && (vm_flags & VM_EXECUTABLE))
69299 + added_exe_file_vma(mm);
69300 +#endif
69301 +
69302 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69303 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69304 + vma->vm_flags |= VM_PAGEEXEC;
69305 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69306 + }
69307 +#endif
69308 +
69309 if (vm_flags & VM_EXECUTABLE)
69310 added_exe_file_vma(mm);
69311
69312 @@ -1316,6 +1460,11 @@ munmap_back:
69313 vma_link(mm, vma, prev, rb_link, rb_parent);
69314 file = vma->vm_file;
69315
69316 +#ifdef CONFIG_PAX_SEGMEXEC
69317 + if (vma_m)
69318 + BUG_ON(pax_mirror_vma(vma_m, vma));
69319 +#endif
69320 +
69321 /* Once vma denies write, undo our temporary denial count */
69322 if (correct_wcount)
69323 atomic_inc(&inode->i_writecount);
69324 @@ -1324,6 +1473,7 @@ out:
69325
69326 mm->total_vm += len >> PAGE_SHIFT;
69327 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69328 + track_exec_limit(mm, addr, addr + len, vm_flags);
69329 if (vm_flags & VM_LOCKED) {
69330 if (!mlock_vma_pages_range(vma, addr, addr + len))
69331 mm->locked_vm += (len >> PAGE_SHIFT);
69332 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69333 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69334 charged = 0;
69335 free_vma:
69336 +
69337 +#ifdef CONFIG_PAX_SEGMEXEC
69338 + if (vma_m)
69339 + kmem_cache_free(vm_area_cachep, vma_m);
69340 +#endif
69341 +
69342 kmem_cache_free(vm_area_cachep, vma);
69343 unacct_error:
69344 if (charged)
69345 @@ -1348,6 +1504,44 @@ unacct_error:
69346 return error;
69347 }
69348
69349 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69350 +{
69351 + if (!vma) {
69352 +#ifdef CONFIG_STACK_GROWSUP
69353 + if (addr > sysctl_heap_stack_gap)
69354 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69355 + else
69356 + vma = find_vma(current->mm, 0);
69357 + if (vma && (vma->vm_flags & VM_GROWSUP))
69358 + return false;
69359 +#endif
69360 + return true;
69361 + }
69362 +
69363 + if (addr + len > vma->vm_start)
69364 + return false;
69365 +
69366 + if (vma->vm_flags & VM_GROWSDOWN)
69367 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69368 +#ifdef CONFIG_STACK_GROWSUP
69369 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69370 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69371 +#endif
69372 +
69373 + return true;
69374 +}
69375 +
69376 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69377 +{
69378 + if (vma->vm_start < len)
69379 + return -ENOMEM;
69380 + if (!(vma->vm_flags & VM_GROWSDOWN))
69381 + return vma->vm_start - len;
69382 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
69383 + return vma->vm_start - len - sysctl_heap_stack_gap;
69384 + return -ENOMEM;
69385 +}
69386 +
69387 /* Get an address range which is currently unmapped.
69388 * For shmat() with addr=0.
69389 *
69390 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69391 if (flags & MAP_FIXED)
69392 return addr;
69393
69394 +#ifdef CONFIG_PAX_RANDMMAP
69395 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69396 +#endif
69397 +
69398 if (addr) {
69399 addr = PAGE_ALIGN(addr);
69400 - vma = find_vma(mm, addr);
69401 - if (TASK_SIZE - len >= addr &&
69402 - (!vma || addr + len <= vma->vm_start))
69403 - return addr;
69404 + if (TASK_SIZE - len >= addr) {
69405 + vma = find_vma(mm, addr);
69406 + if (check_heap_stack_gap(vma, addr, len))
69407 + return addr;
69408 + }
69409 }
69410 if (len > mm->cached_hole_size) {
69411 - start_addr = addr = mm->free_area_cache;
69412 + start_addr = addr = mm->free_area_cache;
69413 } else {
69414 - start_addr = addr = TASK_UNMAPPED_BASE;
69415 - mm->cached_hole_size = 0;
69416 + start_addr = addr = mm->mmap_base;
69417 + mm->cached_hole_size = 0;
69418 }
69419
69420 full_search:
69421 @@ -1396,34 +1595,40 @@ full_search:
69422 * Start a new search - just in case we missed
69423 * some holes.
69424 */
69425 - if (start_addr != TASK_UNMAPPED_BASE) {
69426 - addr = TASK_UNMAPPED_BASE;
69427 - start_addr = addr;
69428 + if (start_addr != mm->mmap_base) {
69429 + start_addr = addr = mm->mmap_base;
69430 mm->cached_hole_size = 0;
69431 goto full_search;
69432 }
69433 return -ENOMEM;
69434 }
69435 - if (!vma || addr + len <= vma->vm_start) {
69436 - /*
69437 - * Remember the place where we stopped the search:
69438 - */
69439 - mm->free_area_cache = addr + len;
69440 - return addr;
69441 - }
69442 + if (check_heap_stack_gap(vma, addr, len))
69443 + break;
69444 if (addr + mm->cached_hole_size < vma->vm_start)
69445 mm->cached_hole_size = vma->vm_start - addr;
69446 addr = vma->vm_end;
69447 }
69448 +
69449 + /*
69450 + * Remember the place where we stopped the search:
69451 + */
69452 + mm->free_area_cache = addr + len;
69453 + return addr;
69454 }
69455 #endif
69456
69457 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69458 {
69459 +
69460 +#ifdef CONFIG_PAX_SEGMEXEC
69461 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69462 + return;
69463 +#endif
69464 +
69465 /*
69466 * Is this a new hole at the lowest possible address?
69467 */
69468 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69469 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69470 mm->free_area_cache = addr;
69471 mm->cached_hole_size = ~0UL;
69472 }
69473 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69474 {
69475 struct vm_area_struct *vma;
69476 struct mm_struct *mm = current->mm;
69477 - unsigned long addr = addr0;
69478 + unsigned long base = mm->mmap_base, addr = addr0;
69479
69480 /* requested length too big for entire address space */
69481 if (len > TASK_SIZE)
69482 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69483 if (flags & MAP_FIXED)
69484 return addr;
69485
69486 +#ifdef CONFIG_PAX_RANDMMAP
69487 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69488 +#endif
69489 +
69490 /* requesting a specific address */
69491 if (addr) {
69492 addr = PAGE_ALIGN(addr);
69493 - vma = find_vma(mm, addr);
69494 - if (TASK_SIZE - len >= addr &&
69495 - (!vma || addr + len <= vma->vm_start))
69496 - return addr;
69497 + if (TASK_SIZE - len >= addr) {
69498 + vma = find_vma(mm, addr);
69499 + if (check_heap_stack_gap(vma, addr, len))
69500 + return addr;
69501 + }
69502 }
69503
69504 /* check if free_area_cache is useful for us */
69505 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69506 /* make sure it can fit in the remaining address space */
69507 if (addr > len) {
69508 vma = find_vma(mm, addr-len);
69509 - if (!vma || addr <= vma->vm_start)
69510 + if (check_heap_stack_gap(vma, addr - len, len))
69511 /* remember the address as a hint for next time */
69512 return (mm->free_area_cache = addr-len);
69513 }
69514 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69515 * return with success:
69516 */
69517 vma = find_vma(mm, addr);
69518 - if (!vma || addr+len <= vma->vm_start)
69519 + if (check_heap_stack_gap(vma, addr, len))
69520 /* remember the address as a hint for next time */
69521 return (mm->free_area_cache = addr);
69522
69523 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69524 mm->cached_hole_size = vma->vm_start - addr;
69525
69526 /* try just below the current vma->vm_start */
69527 - addr = vma->vm_start-len;
69528 - } while (len < vma->vm_start);
69529 + addr = skip_heap_stack_gap(vma, len);
69530 + } while (!IS_ERR_VALUE(addr));
69531
69532 bottomup:
69533 /*
69534 @@ -1507,13 +1717,21 @@ bottomup:
69535 * can happen with large stack limits and large mmap()
69536 * allocations.
69537 */
69538 + mm->mmap_base = TASK_UNMAPPED_BASE;
69539 +
69540 +#ifdef CONFIG_PAX_RANDMMAP
69541 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69542 + mm->mmap_base += mm->delta_mmap;
69543 +#endif
69544 +
69545 + mm->free_area_cache = mm->mmap_base;
69546 mm->cached_hole_size = ~0UL;
69547 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69548 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69549 /*
69550 * Restore the topdown base:
69551 */
69552 - mm->free_area_cache = mm->mmap_base;
69553 + mm->mmap_base = base;
69554 + mm->free_area_cache = base;
69555 mm->cached_hole_size = ~0UL;
69556
69557 return addr;
69558 @@ -1522,6 +1740,12 @@ bottomup:
69559
69560 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69561 {
69562 +
69563 +#ifdef CONFIG_PAX_SEGMEXEC
69564 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69565 + return;
69566 +#endif
69567 +
69568 /*
69569 * Is this a new hole at the highest possible address?
69570 */
69571 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69572 mm->free_area_cache = addr;
69573
69574 /* dont allow allocations above current base */
69575 - if (mm->free_area_cache > mm->mmap_base)
69576 + if (mm->free_area_cache > mm->mmap_base) {
69577 mm->free_area_cache = mm->mmap_base;
69578 + mm->cached_hole_size = ~0UL;
69579 + }
69580 }
69581
69582 unsigned long
69583 @@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69584
69585 EXPORT_SYMBOL(find_vma);
69586
69587 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
69588 +/*
69589 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
69590 + * Note: pprev is set to NULL when return value is NULL.
69591 + */
69592 struct vm_area_struct *
69593 find_vma_prev(struct mm_struct *mm, unsigned long addr,
69594 struct vm_area_struct **pprev)
69595 {
69596 - struct vm_area_struct *vma = NULL, *prev = NULL;
69597 - struct rb_node *rb_node;
69598 - if (!mm)
69599 - goto out;
69600 + struct vm_area_struct *vma;
69601
69602 - /* Guard against addr being lower than the first VMA */
69603 - vma = mm->mmap;
69604 + vma = find_vma(mm, addr);
69605 + *pprev = vma ? vma->vm_prev : NULL;
69606 + return vma;
69607 +}
69608
69609 - /* Go through the RB tree quickly. */
69610 - rb_node = mm->mm_rb.rb_node;
69611 +#ifdef CONFIG_PAX_SEGMEXEC
69612 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69613 +{
69614 + struct vm_area_struct *vma_m;
69615
69616 - while (rb_node) {
69617 - struct vm_area_struct *vma_tmp;
69618 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
69619 -
69620 - if (addr < vma_tmp->vm_end) {
69621 - rb_node = rb_node->rb_left;
69622 - } else {
69623 - prev = vma_tmp;
69624 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
69625 - break;
69626 - rb_node = rb_node->rb_right;
69627 - }
69628 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69629 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69630 + BUG_ON(vma->vm_mirror);
69631 + return NULL;
69632 }
69633 -
69634 -out:
69635 - *pprev = prev;
69636 - return prev ? prev->vm_next : vma;
69637 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69638 + vma_m = vma->vm_mirror;
69639 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69640 + BUG_ON(vma->vm_file != vma_m->vm_file);
69641 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69642 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69643 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69644 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69645 + return vma_m;
69646 }
69647 +#endif
69648
69649 /*
69650 * Verify that the stack growth is acceptable and
69651 @@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69652 return -ENOMEM;
69653
69654 /* Stack limit test */
69655 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69656 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69657 return -ENOMEM;
69658
69659 @@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69660 locked = mm->locked_vm + grow;
69661 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69662 limit >>= PAGE_SHIFT;
69663 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69664 if (locked > limit && !capable(CAP_IPC_LOCK))
69665 return -ENOMEM;
69666 }
69667 @@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69668 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69669 * vma is the last one with address > vma->vm_end. Have to extend vma.
69670 */
69671 +#ifndef CONFIG_IA64
69672 +static
69673 +#endif
69674 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69675 {
69676 int error;
69677 + bool locknext;
69678
69679 if (!(vma->vm_flags & VM_GROWSUP))
69680 return -EFAULT;
69681
69682 + /* Also guard against wrapping around to address 0. */
69683 + if (address < PAGE_ALIGN(address+1))
69684 + address = PAGE_ALIGN(address+1);
69685 + else
69686 + return -ENOMEM;
69687 +
69688 /*
69689 * We must make sure the anon_vma is allocated
69690 * so that the anon_vma locking is not a noop.
69691 */
69692 if (unlikely(anon_vma_prepare(vma)))
69693 return -ENOMEM;
69694 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69695 + if (locknext && anon_vma_prepare(vma->vm_next))
69696 + return -ENOMEM;
69697 vma_lock_anon_vma(vma);
69698 + if (locknext)
69699 + vma_lock_anon_vma(vma->vm_next);
69700
69701 /*
69702 * vma->vm_start/vm_end cannot change under us because the caller
69703 * is required to hold the mmap_sem in read mode. We need the
69704 - * anon_vma lock to serialize against concurrent expand_stacks.
69705 - * Also guard against wrapping around to address 0.
69706 + * anon_vma locks to serialize against concurrent expand_stacks
69707 + * and expand_upwards.
69708 */
69709 - if (address < PAGE_ALIGN(address+4))
69710 - address = PAGE_ALIGN(address+4);
69711 - else {
69712 - vma_unlock_anon_vma(vma);
69713 - return -ENOMEM;
69714 - }
69715 error = 0;
69716
69717 /* Somebody else might have raced and expanded it already */
69718 - if (address > vma->vm_end) {
69719 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69720 + error = -ENOMEM;
69721 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69722 unsigned long size, grow;
69723
69724 size = address - vma->vm_start;
69725 @@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69726 }
69727 }
69728 }
69729 + if (locknext)
69730 + vma_unlock_anon_vma(vma->vm_next);
69731 vma_unlock_anon_vma(vma);
69732 khugepaged_enter_vma_merge(vma);
69733 return error;
69734 @@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
69735 unsigned long address)
69736 {
69737 int error;
69738 + bool lockprev = false;
69739 + struct vm_area_struct *prev;
69740
69741 /*
69742 * We must make sure the anon_vma is allocated
69743 @@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
69744 if (error)
69745 return error;
69746
69747 + prev = vma->vm_prev;
69748 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69749 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69750 +#endif
69751 + if (lockprev && anon_vma_prepare(prev))
69752 + return -ENOMEM;
69753 + if (lockprev)
69754 + vma_lock_anon_vma(prev);
69755 +
69756 vma_lock_anon_vma(vma);
69757
69758 /*
69759 @@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
69760 */
69761
69762 /* Somebody else might have raced and expanded it already */
69763 - if (address < vma->vm_start) {
69764 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69765 + error = -ENOMEM;
69766 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69767 unsigned long size, grow;
69768
69769 +#ifdef CONFIG_PAX_SEGMEXEC
69770 + struct vm_area_struct *vma_m;
69771 +
69772 + vma_m = pax_find_mirror_vma(vma);
69773 +#endif
69774 +
69775 size = vma->vm_end - address;
69776 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69777
69778 @@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
69779 if (!error) {
69780 vma->vm_start = address;
69781 vma->vm_pgoff -= grow;
69782 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69783 +
69784 +#ifdef CONFIG_PAX_SEGMEXEC
69785 + if (vma_m) {
69786 + vma_m->vm_start -= grow << PAGE_SHIFT;
69787 + vma_m->vm_pgoff -= grow;
69788 + }
69789 +#endif
69790 +
69791 perf_event_mmap(vma);
69792 }
69793 }
69794 }
69795 vma_unlock_anon_vma(vma);
69796 + if (lockprev)
69797 + vma_unlock_anon_vma(prev);
69798 khugepaged_enter_vma_merge(vma);
69799 return error;
69800 }
69801 @@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69802 do {
69803 long nrpages = vma_pages(vma);
69804
69805 +#ifdef CONFIG_PAX_SEGMEXEC
69806 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69807 + vma = remove_vma(vma);
69808 + continue;
69809 + }
69810 +#endif
69811 +
69812 mm->total_vm -= nrpages;
69813 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69814 vma = remove_vma(vma);
69815 @@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69816 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69817 vma->vm_prev = NULL;
69818 do {
69819 +
69820 +#ifdef CONFIG_PAX_SEGMEXEC
69821 + if (vma->vm_mirror) {
69822 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69823 + vma->vm_mirror->vm_mirror = NULL;
69824 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69825 + vma->vm_mirror = NULL;
69826 + }
69827 +#endif
69828 +
69829 rb_erase(&vma->vm_rb, &mm->mm_rb);
69830 mm->map_count--;
69831 tail_vma = vma;
69832 @@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69833 struct vm_area_struct *new;
69834 int err = -ENOMEM;
69835
69836 +#ifdef CONFIG_PAX_SEGMEXEC
69837 + struct vm_area_struct *vma_m, *new_m = NULL;
69838 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69839 +#endif
69840 +
69841 if (is_vm_hugetlb_page(vma) && (addr &
69842 ~(huge_page_mask(hstate_vma(vma)))))
69843 return -EINVAL;
69844
69845 +#ifdef CONFIG_PAX_SEGMEXEC
69846 + vma_m = pax_find_mirror_vma(vma);
69847 +#endif
69848 +
69849 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69850 if (!new)
69851 goto out_err;
69852
69853 +#ifdef CONFIG_PAX_SEGMEXEC
69854 + if (vma_m) {
69855 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69856 + if (!new_m) {
69857 + kmem_cache_free(vm_area_cachep, new);
69858 + goto out_err;
69859 + }
69860 + }
69861 +#endif
69862 +
69863 /* most fields are the same, copy all, and then fixup */
69864 *new = *vma;
69865
69866 @@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69867 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69868 }
69869
69870 +#ifdef CONFIG_PAX_SEGMEXEC
69871 + if (vma_m) {
69872 + *new_m = *vma_m;
69873 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69874 + new_m->vm_mirror = new;
69875 + new->vm_mirror = new_m;
69876 +
69877 + if (new_below)
69878 + new_m->vm_end = addr_m;
69879 + else {
69880 + new_m->vm_start = addr_m;
69881 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69882 + }
69883 + }
69884 +#endif
69885 +
69886 pol = mpol_dup(vma_policy(vma));
69887 if (IS_ERR(pol)) {
69888 err = PTR_ERR(pol);
69889 @@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69890 else
69891 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69892
69893 +#ifdef CONFIG_PAX_SEGMEXEC
69894 + if (!err && vma_m) {
69895 + if (anon_vma_clone(new_m, vma_m))
69896 + goto out_free_mpol;
69897 +
69898 + mpol_get(pol);
69899 + vma_set_policy(new_m, pol);
69900 +
69901 + if (new_m->vm_file) {
69902 + get_file(new_m->vm_file);
69903 + if (vma_m->vm_flags & VM_EXECUTABLE)
69904 + added_exe_file_vma(mm);
69905 + }
69906 +
69907 + if (new_m->vm_ops && new_m->vm_ops->open)
69908 + new_m->vm_ops->open(new_m);
69909 +
69910 + if (new_below)
69911 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69912 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69913 + else
69914 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69915 +
69916 + if (err) {
69917 + if (new_m->vm_ops && new_m->vm_ops->close)
69918 + new_m->vm_ops->close(new_m);
69919 + if (new_m->vm_file) {
69920 + if (vma_m->vm_flags & VM_EXECUTABLE)
69921 + removed_exe_file_vma(mm);
69922 + fput(new_m->vm_file);
69923 + }
69924 + mpol_put(pol);
69925 + }
69926 + }
69927 +#endif
69928 +
69929 /* Success. */
69930 if (!err)
69931 return 0;
69932 @@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69933 removed_exe_file_vma(mm);
69934 fput(new->vm_file);
69935 }
69936 - unlink_anon_vmas(new);
69937 out_free_mpol:
69938 mpol_put(pol);
69939 out_free_vma:
69940 +
69941 +#ifdef CONFIG_PAX_SEGMEXEC
69942 + if (new_m) {
69943 + unlink_anon_vmas(new_m);
69944 + kmem_cache_free(vm_area_cachep, new_m);
69945 + }
69946 +#endif
69947 +
69948 + unlink_anon_vmas(new);
69949 kmem_cache_free(vm_area_cachep, new);
69950 out_err:
69951 return err;
69952 @@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69953 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69954 unsigned long addr, int new_below)
69955 {
69956 +
69957 +#ifdef CONFIG_PAX_SEGMEXEC
69958 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69959 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69960 + if (mm->map_count >= sysctl_max_map_count-1)
69961 + return -ENOMEM;
69962 + } else
69963 +#endif
69964 +
69965 if (mm->map_count >= sysctl_max_map_count)
69966 return -ENOMEM;
69967
69968 @@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69969 * work. This now handles partial unmappings.
69970 * Jeremy Fitzhardinge <jeremy@goop.org>
69971 */
69972 +#ifdef CONFIG_PAX_SEGMEXEC
69973 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69974 {
69975 + int ret = __do_munmap(mm, start, len);
69976 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69977 + return ret;
69978 +
69979 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69980 +}
69981 +
69982 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69983 +#else
69984 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69985 +#endif
69986 +{
69987 unsigned long end;
69988 struct vm_area_struct *vma, *prev, *last;
69989
69990 + /*
69991 + * mm->mmap_sem is required to protect against another thread
69992 + * changing the mappings in case we sleep.
69993 + */
69994 + verify_mm_writelocked(mm);
69995 +
69996 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69997 return -EINVAL;
69998
69999 @@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
70000 /* Fix up all other VM information */
70001 remove_vma_list(mm, vma);
70002
70003 + track_exec_limit(mm, start, end, 0UL);
70004 +
70005 return 0;
70006 }
70007
70008 @@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
70009
70010 profile_munmap(addr);
70011
70012 +#ifdef CONFIG_PAX_SEGMEXEC
70013 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
70014 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
70015 + return -EINVAL;
70016 +#endif
70017 +
70018 down_write(&mm->mmap_sem);
70019 ret = do_munmap(mm, addr, len);
70020 up_write(&mm->mmap_sem);
70021 return ret;
70022 }
70023
70024 -static inline void verify_mm_writelocked(struct mm_struct *mm)
70025 -{
70026 -#ifdef CONFIG_DEBUG_VM
70027 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70028 - WARN_ON(1);
70029 - up_read(&mm->mmap_sem);
70030 - }
70031 -#endif
70032 -}
70033 -
70034 /*
70035 * this is really a simplified "do_mmap". it only handles
70036 * anonymous maps. eventually we may be able to do some
70037 @@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70038 struct rb_node ** rb_link, * rb_parent;
70039 pgoff_t pgoff = addr >> PAGE_SHIFT;
70040 int error;
70041 + unsigned long charged;
70042
70043 len = PAGE_ALIGN(len);
70044 if (!len)
70045 @@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70046
70047 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
70048
70049 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
70050 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
70051 + flags &= ~VM_EXEC;
70052 +
70053 +#ifdef CONFIG_PAX_MPROTECT
70054 + if (mm->pax_flags & MF_PAX_MPROTECT)
70055 + flags &= ~VM_MAYEXEC;
70056 +#endif
70057 +
70058 + }
70059 +#endif
70060 +
70061 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
70062 if (error & ~PAGE_MASK)
70063 return error;
70064
70065 + charged = len >> PAGE_SHIFT;
70066 +
70067 /*
70068 * mlock MCL_FUTURE?
70069 */
70070 if (mm->def_flags & VM_LOCKED) {
70071 unsigned long locked, lock_limit;
70072 - locked = len >> PAGE_SHIFT;
70073 + locked = charged;
70074 locked += mm->locked_vm;
70075 lock_limit = rlimit(RLIMIT_MEMLOCK);
70076 lock_limit >>= PAGE_SHIFT;
70077 @@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70078 /*
70079 * Clear old maps. this also does some error checking for us
70080 */
70081 - munmap_back:
70082 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70083 if (vma && vma->vm_start < addr + len) {
70084 if (do_munmap(mm, addr, len))
70085 return -ENOMEM;
70086 - goto munmap_back;
70087 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70088 + BUG_ON(vma && vma->vm_start < addr + len);
70089 }
70090
70091 /* Check against address space limits *after* clearing old maps... */
70092 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
70093 + if (!may_expand_vm(mm, charged))
70094 return -ENOMEM;
70095
70096 if (mm->map_count > sysctl_max_map_count)
70097 return -ENOMEM;
70098
70099 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
70100 + if (security_vm_enough_memory(charged))
70101 return -ENOMEM;
70102
70103 /* Can we just expand an old private anonymous mapping? */
70104 @@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70105 */
70106 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70107 if (!vma) {
70108 - vm_unacct_memory(len >> PAGE_SHIFT);
70109 + vm_unacct_memory(charged);
70110 return -ENOMEM;
70111 }
70112
70113 @@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70114 vma_link(mm, vma, prev, rb_link, rb_parent);
70115 out:
70116 perf_event_mmap(vma);
70117 - mm->total_vm += len >> PAGE_SHIFT;
70118 + mm->total_vm += charged;
70119 if (flags & VM_LOCKED) {
70120 if (!mlock_vma_pages_range(vma, addr, addr + len))
70121 - mm->locked_vm += (len >> PAGE_SHIFT);
70122 + mm->locked_vm += charged;
70123 }
70124 + track_exec_limit(mm, addr, addr + len, flags);
70125 return addr;
70126 }
70127
70128 @@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
70129 * Walk the list again, actually closing and freeing it,
70130 * with preemption enabled, without holding any MM locks.
70131 */
70132 - while (vma)
70133 + while (vma) {
70134 + vma->vm_mirror = NULL;
70135 vma = remove_vma(vma);
70136 + }
70137
70138 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
70139 }
70140 @@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70141 struct vm_area_struct * __vma, * prev;
70142 struct rb_node ** rb_link, * rb_parent;
70143
70144 +#ifdef CONFIG_PAX_SEGMEXEC
70145 + struct vm_area_struct *vma_m = NULL;
70146 +#endif
70147 +
70148 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
70149 + return -EPERM;
70150 +
70151 /*
70152 * The vm_pgoff of a purely anonymous vma should be irrelevant
70153 * until its first write fault, when page's anon_vma and index
70154 @@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70155 if ((vma->vm_flags & VM_ACCOUNT) &&
70156 security_vm_enough_memory_mm(mm, vma_pages(vma)))
70157 return -ENOMEM;
70158 +
70159 +#ifdef CONFIG_PAX_SEGMEXEC
70160 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
70161 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70162 + if (!vma_m)
70163 + return -ENOMEM;
70164 + }
70165 +#endif
70166 +
70167 vma_link(mm, vma, prev, rb_link, rb_parent);
70168 +
70169 +#ifdef CONFIG_PAX_SEGMEXEC
70170 + if (vma_m)
70171 + BUG_ON(pax_mirror_vma(vma_m, vma));
70172 +#endif
70173 +
70174 return 0;
70175 }
70176
70177 @@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70178 struct rb_node **rb_link, *rb_parent;
70179 struct mempolicy *pol;
70180
70181 + BUG_ON(vma->vm_mirror);
70182 +
70183 /*
70184 * If anonymous vma has not yet been faulted, update new pgoff
70185 * to match new location, to increase its chance of merging.
70186 @@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70187 return NULL;
70188 }
70189
70190 +#ifdef CONFIG_PAX_SEGMEXEC
70191 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
70192 +{
70193 + struct vm_area_struct *prev_m;
70194 + struct rb_node **rb_link_m, *rb_parent_m;
70195 + struct mempolicy *pol_m;
70196 +
70197 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
70198 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
70199 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
70200 + *vma_m = *vma;
70201 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
70202 + if (anon_vma_clone(vma_m, vma))
70203 + return -ENOMEM;
70204 + pol_m = vma_policy(vma_m);
70205 + mpol_get(pol_m);
70206 + vma_set_policy(vma_m, pol_m);
70207 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
70208 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
70209 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
70210 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
70211 + if (vma_m->vm_file)
70212 + get_file(vma_m->vm_file);
70213 + if (vma_m->vm_ops && vma_m->vm_ops->open)
70214 + vma_m->vm_ops->open(vma_m);
70215 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
70216 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
70217 + vma_m->vm_mirror = vma;
70218 + vma->vm_mirror = vma_m;
70219 + return 0;
70220 +}
70221 +#endif
70222 +
70223 /*
70224 * Return true if the calling process may expand its vm space by the passed
70225 * number of pages
70226 @@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
70227 unsigned long lim;
70228
70229 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
70230 -
70231 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
70232 if (cur + npages > lim)
70233 return 0;
70234 return 1;
70235 @@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
70236 vma->vm_start = addr;
70237 vma->vm_end = addr + len;
70238
70239 +#ifdef CONFIG_PAX_MPROTECT
70240 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70241 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70242 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
70243 + return -EPERM;
70244 + if (!(vm_flags & VM_EXEC))
70245 + vm_flags &= ~VM_MAYEXEC;
70246 +#else
70247 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70248 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70249 +#endif
70250 + else
70251 + vm_flags &= ~VM_MAYWRITE;
70252 + }
70253 +#endif
70254 +
70255 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
70256 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70257
70258 diff --git a/mm/mprotect.c b/mm/mprotect.c
70259 index 5a688a2..27e031c 100644
70260 --- a/mm/mprotect.c
70261 +++ b/mm/mprotect.c
70262 @@ -23,10 +23,16 @@
70263 #include <linux/mmu_notifier.h>
70264 #include <linux/migrate.h>
70265 #include <linux/perf_event.h>
70266 +
70267 +#ifdef CONFIG_PAX_MPROTECT
70268 +#include <linux/elf.h>
70269 +#endif
70270 +
70271 #include <asm/uaccess.h>
70272 #include <asm/pgtable.h>
70273 #include <asm/cacheflush.h>
70274 #include <asm/tlbflush.h>
70275 +#include <asm/mmu_context.h>
70276
70277 #ifndef pgprot_modify
70278 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70279 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
70280 flush_tlb_range(vma, start, end);
70281 }
70282
70283 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70284 +/* called while holding the mmap semaphor for writing except stack expansion */
70285 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70286 +{
70287 + unsigned long oldlimit, newlimit = 0UL;
70288 +
70289 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70290 + return;
70291 +
70292 + spin_lock(&mm->page_table_lock);
70293 + oldlimit = mm->context.user_cs_limit;
70294 + if ((prot & VM_EXEC) && oldlimit < end)
70295 + /* USER_CS limit moved up */
70296 + newlimit = end;
70297 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70298 + /* USER_CS limit moved down */
70299 + newlimit = start;
70300 +
70301 + if (newlimit) {
70302 + mm->context.user_cs_limit = newlimit;
70303 +
70304 +#ifdef CONFIG_SMP
70305 + wmb();
70306 + cpus_clear(mm->context.cpu_user_cs_mask);
70307 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70308 +#endif
70309 +
70310 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70311 + }
70312 + spin_unlock(&mm->page_table_lock);
70313 + if (newlimit == end) {
70314 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
70315 +
70316 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
70317 + if (is_vm_hugetlb_page(vma))
70318 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70319 + else
70320 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70321 + }
70322 +}
70323 +#endif
70324 +
70325 int
70326 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70327 unsigned long start, unsigned long end, unsigned long newflags)
70328 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70329 int error;
70330 int dirty_accountable = 0;
70331
70332 +#ifdef CONFIG_PAX_SEGMEXEC
70333 + struct vm_area_struct *vma_m = NULL;
70334 + unsigned long start_m, end_m;
70335 +
70336 + start_m = start + SEGMEXEC_TASK_SIZE;
70337 + end_m = end + SEGMEXEC_TASK_SIZE;
70338 +#endif
70339 +
70340 if (newflags == oldflags) {
70341 *pprev = vma;
70342 return 0;
70343 }
70344
70345 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70346 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70347 +
70348 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70349 + return -ENOMEM;
70350 +
70351 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70352 + return -ENOMEM;
70353 + }
70354 +
70355 /*
70356 * If we make a private mapping writable we increase our commit;
70357 * but (without finer accounting) cannot reduce our commit if we
70358 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70359 }
70360 }
70361
70362 +#ifdef CONFIG_PAX_SEGMEXEC
70363 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70364 + if (start != vma->vm_start) {
70365 + error = split_vma(mm, vma, start, 1);
70366 + if (error)
70367 + goto fail;
70368 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70369 + *pprev = (*pprev)->vm_next;
70370 + }
70371 +
70372 + if (end != vma->vm_end) {
70373 + error = split_vma(mm, vma, end, 0);
70374 + if (error)
70375 + goto fail;
70376 + }
70377 +
70378 + if (pax_find_mirror_vma(vma)) {
70379 + error = __do_munmap(mm, start_m, end_m - start_m);
70380 + if (error)
70381 + goto fail;
70382 + } else {
70383 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70384 + if (!vma_m) {
70385 + error = -ENOMEM;
70386 + goto fail;
70387 + }
70388 + vma->vm_flags = newflags;
70389 + error = pax_mirror_vma(vma_m, vma);
70390 + if (error) {
70391 + vma->vm_flags = oldflags;
70392 + goto fail;
70393 + }
70394 + }
70395 + }
70396 +#endif
70397 +
70398 /*
70399 * First try to merge with previous and/or next vma.
70400 */
70401 @@ -204,9 +306,21 @@ success:
70402 * vm_flags and vm_page_prot are protected by the mmap_sem
70403 * held in write mode.
70404 */
70405 +
70406 +#ifdef CONFIG_PAX_SEGMEXEC
70407 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70408 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70409 +#endif
70410 +
70411 vma->vm_flags = newflags;
70412 +
70413 +#ifdef CONFIG_PAX_MPROTECT
70414 + if (mm->binfmt && mm->binfmt->handle_mprotect)
70415 + mm->binfmt->handle_mprotect(vma, newflags);
70416 +#endif
70417 +
70418 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70419 - vm_get_page_prot(newflags));
70420 + vm_get_page_prot(vma->vm_flags));
70421
70422 if (vma_wants_writenotify(vma)) {
70423 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70424 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70425 end = start + len;
70426 if (end <= start)
70427 return -ENOMEM;
70428 +
70429 +#ifdef CONFIG_PAX_SEGMEXEC
70430 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70431 + if (end > SEGMEXEC_TASK_SIZE)
70432 + return -EINVAL;
70433 + } else
70434 +#endif
70435 +
70436 + if (end > TASK_SIZE)
70437 + return -EINVAL;
70438 +
70439 if (!arch_validate_prot(prot))
70440 return -EINVAL;
70441
70442 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70443 /*
70444 * Does the application expect PROT_READ to imply PROT_EXEC:
70445 */
70446 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70447 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70448 prot |= PROT_EXEC;
70449
70450 vm_flags = calc_vm_prot_bits(prot);
70451 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70452 if (start > vma->vm_start)
70453 prev = vma;
70454
70455 +#ifdef CONFIG_PAX_MPROTECT
70456 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70457 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70458 +#endif
70459 +
70460 for (nstart = start ; ; ) {
70461 unsigned long newflags;
70462
70463 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70464
70465 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70466 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70467 + if (prot & (PROT_WRITE | PROT_EXEC))
70468 + gr_log_rwxmprotect(vma->vm_file);
70469 +
70470 + error = -EACCES;
70471 + goto out;
70472 + }
70473 +
70474 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70475 error = -EACCES;
70476 goto out;
70477 }
70478 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70479 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70480 if (error)
70481 goto out;
70482 +
70483 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70484 +
70485 nstart = tmp;
70486
70487 if (nstart < prev->vm_end)
70488 diff --git a/mm/mremap.c b/mm/mremap.c
70489 index d6959cb..18a402a 100644
70490 --- a/mm/mremap.c
70491 +++ b/mm/mremap.c
70492 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70493 continue;
70494 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70495 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70496 +
70497 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70498 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70499 + pte = pte_exprotect(pte);
70500 +#endif
70501 +
70502 set_pte_at(mm, new_addr, new_pte, pte);
70503 }
70504
70505 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70506 if (is_vm_hugetlb_page(vma))
70507 goto Einval;
70508
70509 +#ifdef CONFIG_PAX_SEGMEXEC
70510 + if (pax_find_mirror_vma(vma))
70511 + goto Einval;
70512 +#endif
70513 +
70514 /* We can't remap across vm area boundaries */
70515 if (old_len > vma->vm_end - addr)
70516 goto Efault;
70517 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70518 unsigned long ret = -EINVAL;
70519 unsigned long charged = 0;
70520 unsigned long map_flags;
70521 + unsigned long pax_task_size = TASK_SIZE;
70522
70523 if (new_addr & ~PAGE_MASK)
70524 goto out;
70525
70526 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70527 +#ifdef CONFIG_PAX_SEGMEXEC
70528 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70529 + pax_task_size = SEGMEXEC_TASK_SIZE;
70530 +#endif
70531 +
70532 + pax_task_size -= PAGE_SIZE;
70533 +
70534 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70535 goto out;
70536
70537 /* Check if the location we're moving into overlaps the
70538 * old location at all, and fail if it does.
70539 */
70540 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70541 - goto out;
70542 -
70543 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70544 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70545 goto out;
70546
70547 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70548 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70549 struct vm_area_struct *vma;
70550 unsigned long ret = -EINVAL;
70551 unsigned long charged = 0;
70552 + unsigned long pax_task_size = TASK_SIZE;
70553
70554 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70555 goto out;
70556 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70557 if (!new_len)
70558 goto out;
70559
70560 +#ifdef CONFIG_PAX_SEGMEXEC
70561 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70562 + pax_task_size = SEGMEXEC_TASK_SIZE;
70563 +#endif
70564 +
70565 + pax_task_size -= PAGE_SIZE;
70566 +
70567 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70568 + old_len > pax_task_size || addr > pax_task_size-old_len)
70569 + goto out;
70570 +
70571 if (flags & MREMAP_FIXED) {
70572 if (flags & MREMAP_MAYMOVE)
70573 ret = mremap_to(addr, old_len, new_addr, new_len);
70574 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70575 addr + new_len);
70576 }
70577 ret = addr;
70578 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70579 goto out;
70580 }
70581 }
70582 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70583 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70584 if (ret)
70585 goto out;
70586 +
70587 + map_flags = vma->vm_flags;
70588 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70589 + if (!(ret & ~PAGE_MASK)) {
70590 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70591 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70592 + }
70593 }
70594 out:
70595 if (ret & ~PAGE_MASK)
70596 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70597 index 7fa41b4..6087460 100644
70598 --- a/mm/nobootmem.c
70599 +++ b/mm/nobootmem.c
70600 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70601 unsigned long __init free_all_memory_core_early(int nodeid)
70602 {
70603 int i;
70604 - u64 start, end;
70605 + u64 start, end, startrange, endrange;
70606 unsigned long count = 0;
70607 - struct range *range = NULL;
70608 + struct range *range = NULL, rangerange = { 0, 0 };
70609 int nr_range;
70610
70611 nr_range = get_free_all_memory_range(&range, nodeid);
70612 + startrange = __pa(range) >> PAGE_SHIFT;
70613 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70614
70615 for (i = 0; i < nr_range; i++) {
70616 start = range[i].start;
70617 end = range[i].end;
70618 + if (start <= endrange && startrange < end) {
70619 + BUG_ON(rangerange.start | rangerange.end);
70620 + rangerange = range[i];
70621 + continue;
70622 + }
70623 count += end - start;
70624 __free_pages_memory(start, end);
70625 }
70626 + start = rangerange.start;
70627 + end = rangerange.end;
70628 + count += end - start;
70629 + __free_pages_memory(start, end);
70630
70631 return count;
70632 }
70633 diff --git a/mm/nommu.c b/mm/nommu.c
70634 index b982290..7d73f53 100644
70635 --- a/mm/nommu.c
70636 +++ b/mm/nommu.c
70637 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70638 int sysctl_overcommit_ratio = 50; /* default is 50% */
70639 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70640 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70641 -int heap_stack_gap = 0;
70642
70643 atomic_long_t mmap_pages_allocated;
70644
70645 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70646 EXPORT_SYMBOL(find_vma);
70647
70648 /*
70649 - * find a VMA
70650 - * - we don't extend stack VMAs under NOMMU conditions
70651 - */
70652 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70653 -{
70654 - return find_vma(mm, addr);
70655 -}
70656 -
70657 -/*
70658 * expand a stack to a given address
70659 * - not supported under NOMMU conditions
70660 */
70661 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70662
70663 /* most fields are the same, copy all, and then fixup */
70664 *new = *vma;
70665 + INIT_LIST_HEAD(&new->anon_vma_chain);
70666 *region = *vma->vm_region;
70667 new->vm_region = region;
70668
70669 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70670 index 485be89..c059ad3 100644
70671 --- a/mm/page_alloc.c
70672 +++ b/mm/page_alloc.c
70673 @@ -341,7 +341,7 @@ out:
70674 * This usage means that zero-order pages may not be compound.
70675 */
70676
70677 -static void free_compound_page(struct page *page)
70678 +void free_compound_page(struct page *page)
70679 {
70680 __free_pages_ok(page, compound_order(page));
70681 }
70682 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70683 int i;
70684 int bad = 0;
70685
70686 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70687 + unsigned long index = 1UL << order;
70688 +#endif
70689 +
70690 trace_mm_page_free_direct(page, order);
70691 kmemcheck_free_shadow(page, order);
70692
70693 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70694 debug_check_no_obj_freed(page_address(page),
70695 PAGE_SIZE << order);
70696 }
70697 +
70698 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70699 + for (; index; --index)
70700 + sanitize_highpage(page + index - 1);
70701 +#endif
70702 +
70703 arch_free_page(page, order);
70704 kernel_map_pages(page, 1 << order, 0);
70705
70706 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70707 arch_alloc_page(page, order);
70708 kernel_map_pages(page, 1 << order, 1);
70709
70710 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70711 if (gfp_flags & __GFP_ZERO)
70712 prep_zero_page(page, order, gfp_flags);
70713 +#endif
70714
70715 if (order && (gfp_flags & __GFP_COMP))
70716 prep_compound_page(page, order);
70717 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70718 unsigned long pfn;
70719
70720 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70721 +#ifdef CONFIG_X86_32
70722 + /* boot failures in VMware 8 on 32bit vanilla since
70723 + this change */
70724 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70725 +#else
70726 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70727 +#endif
70728 return 1;
70729 }
70730 return 0;
70731 diff --git a/mm/percpu.c b/mm/percpu.c
70732 index 716eb4a..8d10419 100644
70733 --- a/mm/percpu.c
70734 +++ b/mm/percpu.c
70735 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70736 static unsigned int pcpu_high_unit_cpu __read_mostly;
70737
70738 /* the address of the first chunk which starts with the kernel static area */
70739 -void *pcpu_base_addr __read_mostly;
70740 +void *pcpu_base_addr __read_only;
70741 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70742
70743 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70744 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70745 index e920aa3..137702a 100644
70746 --- a/mm/process_vm_access.c
70747 +++ b/mm/process_vm_access.c
70748 @@ -13,6 +13,7 @@
70749 #include <linux/uio.h>
70750 #include <linux/sched.h>
70751 #include <linux/highmem.h>
70752 +#include <linux/security.h>
70753 #include <linux/ptrace.h>
70754 #include <linux/slab.h>
70755 #include <linux/syscalls.h>
70756 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70757 size_t iov_l_curr_offset = 0;
70758 ssize_t iov_len;
70759
70760 + return -ENOSYS; // PaX: until properly audited
70761 +
70762 /*
70763 * Work out how many pages of struct pages we're going to need
70764 * when eventually calling get_user_pages
70765 */
70766 for (i = 0; i < riovcnt; i++) {
70767 iov_len = rvec[i].iov_len;
70768 - if (iov_len > 0) {
70769 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
70770 - + iov_len)
70771 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70772 - / PAGE_SIZE + 1;
70773 - nr_pages = max(nr_pages, nr_pages_iov);
70774 - }
70775 + if (iov_len <= 0)
70776 + continue;
70777 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70778 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70779 + nr_pages = max(nr_pages, nr_pages_iov);
70780 }
70781
70782 if (nr_pages == 0)
70783 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70784 goto free_proc_pages;
70785 }
70786
70787 - task_lock(task);
70788 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70789 - task_unlock(task);
70790 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70791 rc = -EPERM;
70792 goto put_task_struct;
70793 }
70794 - mm = task->mm;
70795
70796 - if (!mm || (task->flags & PF_KTHREAD)) {
70797 - task_unlock(task);
70798 - rc = -EINVAL;
70799 + mm = mm_access(task, PTRACE_MODE_ATTACH);
70800 + if (!mm || IS_ERR(mm)) {
70801 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70802 + /*
70803 + * Explicitly map EACCES to EPERM as EPERM is a more a
70804 + * appropriate error code for process_vw_readv/writev
70805 + */
70806 + if (rc == -EACCES)
70807 + rc = -EPERM;
70808 goto put_task_struct;
70809 }
70810
70811 - atomic_inc(&mm->mm_users);
70812 - task_unlock(task);
70813 -
70814 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70815 rc = process_vm_rw_single_vec(
70816 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70817 diff --git a/mm/rmap.c b/mm/rmap.c
70818 index a4fd368..e0ffec7 100644
70819 --- a/mm/rmap.c
70820 +++ b/mm/rmap.c
70821 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70822 struct anon_vma *anon_vma = vma->anon_vma;
70823 struct anon_vma_chain *avc;
70824
70825 +#ifdef CONFIG_PAX_SEGMEXEC
70826 + struct anon_vma_chain *avc_m = NULL;
70827 +#endif
70828 +
70829 might_sleep();
70830 if (unlikely(!anon_vma)) {
70831 struct mm_struct *mm = vma->vm_mm;
70832 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70833 if (!avc)
70834 goto out_enomem;
70835
70836 +#ifdef CONFIG_PAX_SEGMEXEC
70837 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70838 + if (!avc_m)
70839 + goto out_enomem_free_avc;
70840 +#endif
70841 +
70842 anon_vma = find_mergeable_anon_vma(vma);
70843 allocated = NULL;
70844 if (!anon_vma) {
70845 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70846 /* page_table_lock to protect against threads */
70847 spin_lock(&mm->page_table_lock);
70848 if (likely(!vma->anon_vma)) {
70849 +
70850 +#ifdef CONFIG_PAX_SEGMEXEC
70851 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70852 +
70853 + if (vma_m) {
70854 + BUG_ON(vma_m->anon_vma);
70855 + vma_m->anon_vma = anon_vma;
70856 + avc_m->anon_vma = anon_vma;
70857 + avc_m->vma = vma;
70858 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70859 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70860 + avc_m = NULL;
70861 + }
70862 +#endif
70863 +
70864 vma->anon_vma = anon_vma;
70865 avc->anon_vma = anon_vma;
70866 avc->vma = vma;
70867 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70868
70869 if (unlikely(allocated))
70870 put_anon_vma(allocated);
70871 +
70872 +#ifdef CONFIG_PAX_SEGMEXEC
70873 + if (unlikely(avc_m))
70874 + anon_vma_chain_free(avc_m);
70875 +#endif
70876 +
70877 if (unlikely(avc))
70878 anon_vma_chain_free(avc);
70879 }
70880 return 0;
70881
70882 out_enomem_free_avc:
70883 +
70884 +#ifdef CONFIG_PAX_SEGMEXEC
70885 + if (avc_m)
70886 + anon_vma_chain_free(avc_m);
70887 +#endif
70888 +
70889 anon_vma_chain_free(avc);
70890 out_enomem:
70891 return -ENOMEM;
70892 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70893 * Attach the anon_vmas from src to dst.
70894 * Returns 0 on success, -ENOMEM on failure.
70895 */
70896 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70897 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70898 {
70899 struct anon_vma_chain *avc, *pavc;
70900 struct anon_vma *root = NULL;
70901 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70902 * the corresponding VMA in the parent process is attached to.
70903 * Returns 0 on success, non-zero on failure.
70904 */
70905 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70906 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70907 {
70908 struct anon_vma_chain *avc;
70909 struct anon_vma *anon_vma;
70910 diff --git a/mm/shmem.c b/mm/shmem.c
70911 index 6c253f7..367e20a 100644
70912 --- a/mm/shmem.c
70913 +++ b/mm/shmem.c
70914 @@ -31,7 +31,7 @@
70915 #include <linux/export.h>
70916 #include <linux/swap.h>
70917
70918 -static struct vfsmount *shm_mnt;
70919 +struct vfsmount *shm_mnt;
70920
70921 #ifdef CONFIG_SHMEM
70922 /*
70923 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70924 #define BOGO_DIRENT_SIZE 20
70925
70926 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70927 -#define SHORT_SYMLINK_LEN 128
70928 +#define SHORT_SYMLINK_LEN 64
70929
70930 struct shmem_xattr {
70931 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70932 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70933 int err = -ENOMEM;
70934
70935 /* Round up to L1_CACHE_BYTES to resist false sharing */
70936 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70937 - L1_CACHE_BYTES), GFP_KERNEL);
70938 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70939 if (!sbinfo)
70940 return -ENOMEM;
70941
70942 diff --git a/mm/slab.c b/mm/slab.c
70943 index 83311c9a..fcf8f86 100644
70944 --- a/mm/slab.c
70945 +++ b/mm/slab.c
70946 @@ -151,7 +151,7 @@
70947
70948 /* Legal flag mask for kmem_cache_create(). */
70949 #if DEBUG
70950 -# define CREATE_MASK (SLAB_RED_ZONE | \
70951 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70952 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70953 SLAB_CACHE_DMA | \
70954 SLAB_STORE_USER | \
70955 @@ -159,7 +159,7 @@
70956 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70957 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70958 #else
70959 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70960 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70961 SLAB_CACHE_DMA | \
70962 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70963 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70964 @@ -288,7 +288,7 @@ struct kmem_list3 {
70965 * Need this for bootstrapping a per node allocator.
70966 */
70967 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70968 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70969 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70970 #define CACHE_CACHE 0
70971 #define SIZE_AC MAX_NUMNODES
70972 #define SIZE_L3 (2 * MAX_NUMNODES)
70973 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70974 if ((x)->max_freeable < i) \
70975 (x)->max_freeable = i; \
70976 } while (0)
70977 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70978 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70979 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70980 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70981 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70982 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70983 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70984 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70985 #else
70986 #define STATS_INC_ACTIVE(x) do { } while (0)
70987 #define STATS_DEC_ACTIVE(x) do { } while (0)
70988 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70989 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70990 */
70991 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70992 - const struct slab *slab, void *obj)
70993 + const struct slab *slab, const void *obj)
70994 {
70995 u32 offset = (obj - slab->s_mem);
70996 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70997 @@ -564,7 +564,7 @@ struct cache_names {
70998 static struct cache_names __initdata cache_names[] = {
70999 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
71000 #include <linux/kmalloc_sizes.h>
71001 - {NULL,}
71002 + {NULL}
71003 #undef CACHE
71004 };
71005
71006 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
71007 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
71008 sizes[INDEX_AC].cs_size,
71009 ARCH_KMALLOC_MINALIGN,
71010 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71011 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71012 NULL);
71013
71014 if (INDEX_AC != INDEX_L3) {
71015 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
71016 kmem_cache_create(names[INDEX_L3].name,
71017 sizes[INDEX_L3].cs_size,
71018 ARCH_KMALLOC_MINALIGN,
71019 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71020 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71021 NULL);
71022 }
71023
71024 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
71025 sizes->cs_cachep = kmem_cache_create(names->name,
71026 sizes->cs_size,
71027 ARCH_KMALLOC_MINALIGN,
71028 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71029 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71030 NULL);
71031 }
71032 #ifdef CONFIG_ZONE_DMA
71033 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
71034 }
71035 /* cpu stats */
71036 {
71037 - unsigned long allochit = atomic_read(&cachep->allochit);
71038 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
71039 - unsigned long freehit = atomic_read(&cachep->freehit);
71040 - unsigned long freemiss = atomic_read(&cachep->freemiss);
71041 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
71042 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
71043 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
71044 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
71045
71046 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
71047 allochit, allocmiss, freehit, freemiss);
71048 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
71049 {
71050 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
71051 #ifdef CONFIG_DEBUG_SLAB_LEAK
71052 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
71053 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
71054 #endif
71055 return 0;
71056 }
71057 module_init(slab_proc_init);
71058 #endif
71059
71060 +void check_object_size(const void *ptr, unsigned long n, bool to)
71061 +{
71062 +
71063 +#ifdef CONFIG_PAX_USERCOPY
71064 + struct page *page;
71065 + struct kmem_cache *cachep = NULL;
71066 + struct slab *slabp;
71067 + unsigned int objnr;
71068 + unsigned long offset;
71069 + const char *type;
71070 +
71071 + if (!n)
71072 + return;
71073 +
71074 + type = "<null>";
71075 + if (ZERO_OR_NULL_PTR(ptr))
71076 + goto report;
71077 +
71078 + if (!virt_addr_valid(ptr))
71079 + return;
71080 +
71081 + page = virt_to_head_page(ptr);
71082 +
71083 + type = "<process stack>";
71084 + if (!PageSlab(page)) {
71085 + if (object_is_on_stack(ptr, n) == -1)
71086 + goto report;
71087 + return;
71088 + }
71089 +
71090 + cachep = page_get_cache(page);
71091 + type = cachep->name;
71092 + if (!(cachep->flags & SLAB_USERCOPY))
71093 + goto report;
71094 +
71095 + slabp = page_get_slab(page);
71096 + objnr = obj_to_index(cachep, slabp, ptr);
71097 + BUG_ON(objnr >= cachep->num);
71098 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
71099 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
71100 + return;
71101 +
71102 +report:
71103 + pax_report_usercopy(ptr, n, to, type);
71104 +#endif
71105 +
71106 +}
71107 +EXPORT_SYMBOL(check_object_size);
71108 +
71109 /**
71110 * ksize - get the actual amount of memory allocated for a given object
71111 * @objp: Pointer to the object
71112 diff --git a/mm/slob.c b/mm/slob.c
71113 index 8105be4..e045f96 100644
71114 --- a/mm/slob.c
71115 +++ b/mm/slob.c
71116 @@ -29,7 +29,7 @@
71117 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
71118 * alloc_pages() directly, allocating compound pages so the page order
71119 * does not have to be separately tracked, and also stores the exact
71120 - * allocation size in page->private so that it can be used to accurately
71121 + * allocation size in slob_page->size so that it can be used to accurately
71122 * provide ksize(). These objects are detected in kfree() because slob_page()
71123 * is false for them.
71124 *
71125 @@ -58,6 +58,7 @@
71126 */
71127
71128 #include <linux/kernel.h>
71129 +#include <linux/sched.h>
71130 #include <linux/slab.h>
71131 #include <linux/mm.h>
71132 #include <linux/swap.h> /* struct reclaim_state */
71133 @@ -102,7 +103,8 @@ struct slob_page {
71134 unsigned long flags; /* mandatory */
71135 atomic_t _count; /* mandatory */
71136 slobidx_t units; /* free units left in page */
71137 - unsigned long pad[2];
71138 + unsigned long pad[1];
71139 + unsigned long size; /* size when >=PAGE_SIZE */
71140 slob_t *free; /* first free slob_t in page */
71141 struct list_head list; /* linked list of free pages */
71142 };
71143 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
71144 */
71145 static inline int is_slob_page(struct slob_page *sp)
71146 {
71147 - return PageSlab((struct page *)sp);
71148 + return PageSlab((struct page *)sp) && !sp->size;
71149 }
71150
71151 static inline void set_slob_page(struct slob_page *sp)
71152 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
71153
71154 static inline struct slob_page *slob_page(const void *addr)
71155 {
71156 - return (struct slob_page *)virt_to_page(addr);
71157 + return (struct slob_page *)virt_to_head_page(addr);
71158 }
71159
71160 /*
71161 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
71162 /*
71163 * Return the size of a slob block.
71164 */
71165 -static slobidx_t slob_units(slob_t *s)
71166 +static slobidx_t slob_units(const slob_t *s)
71167 {
71168 if (s->units > 0)
71169 return s->units;
71170 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
71171 /*
71172 * Return the next free slob block pointer after this one.
71173 */
71174 -static slob_t *slob_next(slob_t *s)
71175 +static slob_t *slob_next(const slob_t *s)
71176 {
71177 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
71178 slobidx_t next;
71179 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
71180 /*
71181 * Returns true if s is the last free block in its page.
71182 */
71183 -static int slob_last(slob_t *s)
71184 +static int slob_last(const slob_t *s)
71185 {
71186 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
71187 }
71188 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
71189 if (!page)
71190 return NULL;
71191
71192 + set_slob_page(page);
71193 return page_address(page);
71194 }
71195
71196 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
71197 if (!b)
71198 return NULL;
71199 sp = slob_page(b);
71200 - set_slob_page(sp);
71201
71202 spin_lock_irqsave(&slob_lock, flags);
71203 sp->units = SLOB_UNITS(PAGE_SIZE);
71204 sp->free = b;
71205 + sp->size = 0;
71206 INIT_LIST_HEAD(&sp->list);
71207 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
71208 set_slob_page_free(sp, slob_list);
71209 @@ -476,10 +479,9 @@ out:
71210 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
71211 */
71212
71213 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71214 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
71215 {
71216 - unsigned int *m;
71217 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71218 + slob_t *m;
71219 void *ret;
71220
71221 gfp &= gfp_allowed_mask;
71222 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71223
71224 if (!m)
71225 return NULL;
71226 - *m = size;
71227 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
71228 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
71229 + m[0].units = size;
71230 + m[1].units = align;
71231 ret = (void *)m + align;
71232
71233 trace_kmalloc_node(_RET_IP_, ret,
71234 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71235 gfp |= __GFP_COMP;
71236 ret = slob_new_pages(gfp, order, node);
71237 if (ret) {
71238 - struct page *page;
71239 - page = virt_to_page(ret);
71240 - page->private = size;
71241 + struct slob_page *sp;
71242 + sp = slob_page(ret);
71243 + sp->size = size;
71244 }
71245
71246 trace_kmalloc_node(_RET_IP_, ret,
71247 size, PAGE_SIZE << order, gfp, node);
71248 }
71249
71250 - kmemleak_alloc(ret, size, 1, gfp);
71251 + return ret;
71252 +}
71253 +
71254 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71255 +{
71256 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71257 + void *ret = __kmalloc_node_align(size, gfp, node, align);
71258 +
71259 + if (!ZERO_OR_NULL_PTR(ret))
71260 + kmemleak_alloc(ret, size, 1, gfp);
71261 return ret;
71262 }
71263 EXPORT_SYMBOL(__kmalloc_node);
71264 @@ -533,13 +547,92 @@ void kfree(const void *block)
71265 sp = slob_page(block);
71266 if (is_slob_page(sp)) {
71267 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71268 - unsigned int *m = (unsigned int *)(block - align);
71269 - slob_free(m, *m + align);
71270 - } else
71271 + slob_t *m = (slob_t *)(block - align);
71272 + slob_free(m, m[0].units + align);
71273 + } else {
71274 + clear_slob_page(sp);
71275 + free_slob_page(sp);
71276 + sp->size = 0;
71277 put_page(&sp->page);
71278 + }
71279 }
71280 EXPORT_SYMBOL(kfree);
71281
71282 +void check_object_size(const void *ptr, unsigned long n, bool to)
71283 +{
71284 +
71285 +#ifdef CONFIG_PAX_USERCOPY
71286 + struct slob_page *sp;
71287 + const slob_t *free;
71288 + const void *base;
71289 + unsigned long flags;
71290 + const char *type;
71291 +
71292 + if (!n)
71293 + return;
71294 +
71295 + type = "<null>";
71296 + if (ZERO_OR_NULL_PTR(ptr))
71297 + goto report;
71298 +
71299 + if (!virt_addr_valid(ptr))
71300 + return;
71301 +
71302 + type = "<process stack>";
71303 + sp = slob_page(ptr);
71304 + if (!PageSlab((struct page *)sp)) {
71305 + if (object_is_on_stack(ptr, n) == -1)
71306 + goto report;
71307 + return;
71308 + }
71309 +
71310 + type = "<slob>";
71311 + if (sp->size) {
71312 + base = page_address(&sp->page);
71313 + if (base <= ptr && n <= sp->size - (ptr - base))
71314 + return;
71315 + goto report;
71316 + }
71317 +
71318 + /* some tricky double walking to find the chunk */
71319 + spin_lock_irqsave(&slob_lock, flags);
71320 + base = (void *)((unsigned long)ptr & PAGE_MASK);
71321 + free = sp->free;
71322 +
71323 + while (!slob_last(free) && (void *)free <= ptr) {
71324 + base = free + slob_units(free);
71325 + free = slob_next(free);
71326 + }
71327 +
71328 + while (base < (void *)free) {
71329 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71330 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
71331 + int offset;
71332 +
71333 + if (ptr < base + align)
71334 + break;
71335 +
71336 + offset = ptr - base - align;
71337 + if (offset >= m) {
71338 + base += size;
71339 + continue;
71340 + }
71341 +
71342 + if (n > m - offset)
71343 + break;
71344 +
71345 + spin_unlock_irqrestore(&slob_lock, flags);
71346 + return;
71347 + }
71348 +
71349 + spin_unlock_irqrestore(&slob_lock, flags);
71350 +report:
71351 + pax_report_usercopy(ptr, n, to, type);
71352 +#endif
71353 +
71354 +}
71355 +EXPORT_SYMBOL(check_object_size);
71356 +
71357 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71358 size_t ksize(const void *block)
71359 {
71360 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
71361 sp = slob_page(block);
71362 if (is_slob_page(sp)) {
71363 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71364 - unsigned int *m = (unsigned int *)(block - align);
71365 - return SLOB_UNITS(*m) * SLOB_UNIT;
71366 + slob_t *m = (slob_t *)(block - align);
71367 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71368 } else
71369 - return sp->page.private;
71370 + return sp->size;
71371 }
71372 EXPORT_SYMBOL(ksize);
71373
71374 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71375 {
71376 struct kmem_cache *c;
71377
71378 +#ifdef CONFIG_PAX_USERCOPY
71379 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
71380 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71381 +#else
71382 c = slob_alloc(sizeof(struct kmem_cache),
71383 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71384 +#endif
71385
71386 if (c) {
71387 c->name = name;
71388 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71389
71390 lockdep_trace_alloc(flags);
71391
71392 +#ifdef CONFIG_PAX_USERCOPY
71393 + b = __kmalloc_node_align(c->size, flags, node, c->align);
71394 +#else
71395 if (c->size < PAGE_SIZE) {
71396 b = slob_alloc(c->size, flags, c->align, node);
71397 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71398 SLOB_UNITS(c->size) * SLOB_UNIT,
71399 flags, node);
71400 } else {
71401 + struct slob_page *sp;
71402 +
71403 b = slob_new_pages(flags, get_order(c->size), node);
71404 + sp = slob_page(b);
71405 + sp->size = c->size;
71406 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71407 PAGE_SIZE << get_order(c->size),
71408 flags, node);
71409 }
71410 +#endif
71411
71412 if (c->ctor)
71413 c->ctor(b);
71414 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71415
71416 static void __kmem_cache_free(void *b, int size)
71417 {
71418 - if (size < PAGE_SIZE)
71419 + struct slob_page *sp = slob_page(b);
71420 +
71421 + if (is_slob_page(sp))
71422 slob_free(b, size);
71423 - else
71424 + else {
71425 + clear_slob_page(sp);
71426 + free_slob_page(sp);
71427 + sp->size = 0;
71428 slob_free_pages(b, get_order(size));
71429 + }
71430 }
71431
71432 static void kmem_rcu_free(struct rcu_head *head)
71433 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71434
71435 void kmem_cache_free(struct kmem_cache *c, void *b)
71436 {
71437 + int size = c->size;
71438 +
71439 +#ifdef CONFIG_PAX_USERCOPY
71440 + if (size + c->align < PAGE_SIZE) {
71441 + size += c->align;
71442 + b -= c->align;
71443 + }
71444 +#endif
71445 +
71446 kmemleak_free_recursive(b, c->flags);
71447 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71448 struct slob_rcu *slob_rcu;
71449 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71450 - slob_rcu->size = c->size;
71451 + slob_rcu = b + (size - sizeof(struct slob_rcu));
71452 + slob_rcu->size = size;
71453 call_rcu(&slob_rcu->head, kmem_rcu_free);
71454 } else {
71455 - __kmem_cache_free(b, c->size);
71456 + __kmem_cache_free(b, size);
71457 }
71458
71459 +#ifdef CONFIG_PAX_USERCOPY
71460 + trace_kfree(_RET_IP_, b);
71461 +#else
71462 trace_kmem_cache_free(_RET_IP_, b);
71463 +#endif
71464 +
71465 }
71466 EXPORT_SYMBOL(kmem_cache_free);
71467
71468 diff --git a/mm/slub.c b/mm/slub.c
71469 index 1a919f0..1739c9b 100644
71470 --- a/mm/slub.c
71471 +++ b/mm/slub.c
71472 @@ -208,7 +208,7 @@ struct track {
71473
71474 enum track_item { TRACK_ALLOC, TRACK_FREE };
71475
71476 -#ifdef CONFIG_SYSFS
71477 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71478 static int sysfs_slab_add(struct kmem_cache *);
71479 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71480 static void sysfs_slab_remove(struct kmem_cache *);
71481 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71482 if (!t->addr)
71483 return;
71484
71485 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71486 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71487 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71488 #ifdef CONFIG_STACKTRACE
71489 {
71490 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71491
71492 page = virt_to_head_page(x);
71493
71494 + BUG_ON(!PageSlab(page));
71495 +
71496 slab_free(s, page, x, _RET_IP_);
71497
71498 trace_kmem_cache_free(_RET_IP_, x);
71499 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
71500 * Merge control. If this is set then no merging of slab caches will occur.
71501 * (Could be removed. This was introduced to pacify the merge skeptics.)
71502 */
71503 -static int slub_nomerge;
71504 +static int slub_nomerge = 1;
71505
71506 /*
71507 * Calculate the order of allocation given an slab object size.
71508 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71509 else
71510 s->cpu_partial = 30;
71511
71512 - s->refcount = 1;
71513 + atomic_set(&s->refcount, 1);
71514 #ifdef CONFIG_NUMA
71515 s->remote_node_defrag_ratio = 1000;
71516 #endif
71517 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71518 void kmem_cache_destroy(struct kmem_cache *s)
71519 {
71520 down_write(&slub_lock);
71521 - s->refcount--;
71522 - if (!s->refcount) {
71523 + if (atomic_dec_and_test(&s->refcount)) {
71524 list_del(&s->list);
71525 up_write(&slub_lock);
71526 if (kmem_cache_close(s)) {
71527 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71528 EXPORT_SYMBOL(__kmalloc_node);
71529 #endif
71530
71531 +void check_object_size(const void *ptr, unsigned long n, bool to)
71532 +{
71533 +
71534 +#ifdef CONFIG_PAX_USERCOPY
71535 + struct page *page;
71536 + struct kmem_cache *s = NULL;
71537 + unsigned long offset;
71538 + const char *type;
71539 +
71540 + if (!n)
71541 + return;
71542 +
71543 + type = "<null>";
71544 + if (ZERO_OR_NULL_PTR(ptr))
71545 + goto report;
71546 +
71547 + if (!virt_addr_valid(ptr))
71548 + return;
71549 +
71550 + page = virt_to_head_page(ptr);
71551 +
71552 + type = "<process stack>";
71553 + if (!PageSlab(page)) {
71554 + if (object_is_on_stack(ptr, n) == -1)
71555 + goto report;
71556 + return;
71557 + }
71558 +
71559 + s = page->slab;
71560 + type = s->name;
71561 + if (!(s->flags & SLAB_USERCOPY))
71562 + goto report;
71563 +
71564 + offset = (ptr - page_address(page)) % s->size;
71565 + if (offset <= s->objsize && n <= s->objsize - offset)
71566 + return;
71567 +
71568 +report:
71569 + pax_report_usercopy(ptr, n, to, type);
71570 +#endif
71571 +
71572 +}
71573 +EXPORT_SYMBOL(check_object_size);
71574 +
71575 size_t ksize(const void *object)
71576 {
71577 struct page *page;
71578 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71579 int node;
71580
71581 list_add(&s->list, &slab_caches);
71582 - s->refcount = -1;
71583 + atomic_set(&s->refcount, -1);
71584
71585 for_each_node_state(node, N_NORMAL_MEMORY) {
71586 struct kmem_cache_node *n = get_node(s, node);
71587 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71588
71589 /* Caches that are not of the two-to-the-power-of size */
71590 if (KMALLOC_MIN_SIZE <= 32) {
71591 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71592 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71593 caches++;
71594 }
71595
71596 if (KMALLOC_MIN_SIZE <= 64) {
71597 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71598 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71599 caches++;
71600 }
71601
71602 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71603 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71604 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71605 caches++;
71606 }
71607
71608 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71609 /*
71610 * We may have set a slab to be unmergeable during bootstrap.
71611 */
71612 - if (s->refcount < 0)
71613 + if (atomic_read(&s->refcount) < 0)
71614 return 1;
71615
71616 return 0;
71617 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71618 down_write(&slub_lock);
71619 s = find_mergeable(size, align, flags, name, ctor);
71620 if (s) {
71621 - s->refcount++;
71622 + atomic_inc(&s->refcount);
71623 /*
71624 * Adjust the object sizes so that we clear
71625 * the complete object on kzalloc.
71626 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71627 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71628
71629 if (sysfs_slab_alias(s, name)) {
71630 - s->refcount--;
71631 + atomic_dec(&s->refcount);
71632 goto err;
71633 }
71634 up_write(&slub_lock);
71635 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71636 }
71637 #endif
71638
71639 -#ifdef CONFIG_SYSFS
71640 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71641 static int count_inuse(struct page *page)
71642 {
71643 return page->inuse;
71644 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71645 validate_slab_cache(kmalloc_caches[9]);
71646 }
71647 #else
71648 -#ifdef CONFIG_SYSFS
71649 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71650 static void resiliency_test(void) {};
71651 #endif
71652 #endif
71653
71654 -#ifdef CONFIG_SYSFS
71655 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71656 enum slab_stat_type {
71657 SL_ALL, /* All slabs */
71658 SL_PARTIAL, /* Only partially allocated slabs */
71659 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71660
71661 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71662 {
71663 - return sprintf(buf, "%d\n", s->refcount - 1);
71664 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71665 }
71666 SLAB_ATTR_RO(aliases);
71667
71668 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71669 return name;
71670 }
71671
71672 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71673 static int sysfs_slab_add(struct kmem_cache *s)
71674 {
71675 int err;
71676 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71677 kobject_del(&s->kobj);
71678 kobject_put(&s->kobj);
71679 }
71680 +#endif
71681
71682 /*
71683 * Need to buffer aliases during bootup until sysfs becomes
71684 @@ -5298,6 +5345,7 @@ struct saved_alias {
71685
71686 static struct saved_alias *alias_list;
71687
71688 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71689 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71690 {
71691 struct saved_alias *al;
71692 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71693 alias_list = al;
71694 return 0;
71695 }
71696 +#endif
71697
71698 static int __init slab_sysfs_init(void)
71699 {
71700 diff --git a/mm/swap.c b/mm/swap.c
71701 index 55b266d..a532537 100644
71702 --- a/mm/swap.c
71703 +++ b/mm/swap.c
71704 @@ -31,6 +31,7 @@
71705 #include <linux/backing-dev.h>
71706 #include <linux/memcontrol.h>
71707 #include <linux/gfp.h>
71708 +#include <linux/hugetlb.h>
71709
71710 #include "internal.h"
71711
71712 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71713
71714 __page_cache_release(page);
71715 dtor = get_compound_page_dtor(page);
71716 + if (!PageHuge(page))
71717 + BUG_ON(dtor != free_compound_page);
71718 (*dtor)(page);
71719 }
71720
71721 diff --git a/mm/swapfile.c b/mm/swapfile.c
71722 index b1cd120..aaae885 100644
71723 --- a/mm/swapfile.c
71724 +++ b/mm/swapfile.c
71725 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71726
71727 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71728 /* Activity counter to indicate that a swapon or swapoff has occurred */
71729 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71730 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71731
71732 static inline unsigned char swap_count(unsigned char ent)
71733 {
71734 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71735 }
71736 filp_close(swap_file, NULL);
71737 err = 0;
71738 - atomic_inc(&proc_poll_event);
71739 + atomic_inc_unchecked(&proc_poll_event);
71740 wake_up_interruptible(&proc_poll_wait);
71741
71742 out_dput:
71743 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71744
71745 poll_wait(file, &proc_poll_wait, wait);
71746
71747 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
71748 - seq->poll_event = atomic_read(&proc_poll_event);
71749 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71750 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71751 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71752 }
71753
71754 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71755 return ret;
71756
71757 seq = file->private_data;
71758 - seq->poll_event = atomic_read(&proc_poll_event);
71759 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71760 return 0;
71761 }
71762
71763 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71764 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71765
71766 mutex_unlock(&swapon_mutex);
71767 - atomic_inc(&proc_poll_event);
71768 + atomic_inc_unchecked(&proc_poll_event);
71769 wake_up_interruptible(&proc_poll_wait);
71770
71771 if (S_ISREG(inode->i_mode))
71772 diff --git a/mm/util.c b/mm/util.c
71773 index 136ac4f..5117eef 100644
71774 --- a/mm/util.c
71775 +++ b/mm/util.c
71776 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71777 * allocated buffer. Use this if you don't want to free the buffer immediately
71778 * like, for example, with RCU.
71779 */
71780 +#undef __krealloc
71781 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71782 {
71783 void *ret;
71784 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71785 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71786 * %NULL pointer, the object pointed to is freed.
71787 */
71788 +#undef krealloc
71789 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71790 {
71791 void *ret;
71792 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71793 void arch_pick_mmap_layout(struct mm_struct *mm)
71794 {
71795 mm->mmap_base = TASK_UNMAPPED_BASE;
71796 +
71797 +#ifdef CONFIG_PAX_RANDMMAP
71798 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71799 + mm->mmap_base += mm->delta_mmap;
71800 +#endif
71801 +
71802 mm->get_unmapped_area = arch_get_unmapped_area;
71803 mm->unmap_area = arch_unmap_area;
71804 }
71805 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71806 index 27be2f0..0aef2c2 100644
71807 --- a/mm/vmalloc.c
71808 +++ b/mm/vmalloc.c
71809 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71810
71811 pte = pte_offset_kernel(pmd, addr);
71812 do {
71813 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71814 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71815 +
71816 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71817 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71818 + BUG_ON(!pte_exec(*pte));
71819 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71820 + continue;
71821 + }
71822 +#endif
71823 +
71824 + {
71825 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71826 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71827 + }
71828 } while (pte++, addr += PAGE_SIZE, addr != end);
71829 }
71830
71831 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71832 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71833 {
71834 pte_t *pte;
71835 + int ret = -ENOMEM;
71836
71837 /*
71838 * nr is a running index into the array which helps higher level
71839 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71840 pte = pte_alloc_kernel(pmd, addr);
71841 if (!pte)
71842 return -ENOMEM;
71843 +
71844 + pax_open_kernel();
71845 do {
71846 struct page *page = pages[*nr];
71847
71848 - if (WARN_ON(!pte_none(*pte)))
71849 - return -EBUSY;
71850 - if (WARN_ON(!page))
71851 - return -ENOMEM;
71852 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71853 + if (pgprot_val(prot) & _PAGE_NX)
71854 +#endif
71855 +
71856 + if (WARN_ON(!pte_none(*pte))) {
71857 + ret = -EBUSY;
71858 + goto out;
71859 + }
71860 + if (WARN_ON(!page)) {
71861 + ret = -ENOMEM;
71862 + goto out;
71863 + }
71864 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71865 (*nr)++;
71866 } while (pte++, addr += PAGE_SIZE, addr != end);
71867 - return 0;
71868 + ret = 0;
71869 +out:
71870 + pax_close_kernel();
71871 + return ret;
71872 }
71873
71874 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71875 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71876 * and fall back on vmalloc() if that fails. Others
71877 * just put it in the vmalloc space.
71878 */
71879 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71880 +#ifdef CONFIG_MODULES
71881 +#ifdef MODULES_VADDR
71882 unsigned long addr = (unsigned long)x;
71883 if (addr >= MODULES_VADDR && addr < MODULES_END)
71884 return 1;
71885 #endif
71886 +
71887 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71888 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71889 + return 1;
71890 +#endif
71891 +
71892 +#endif
71893 +
71894 return is_vmalloc_addr(x);
71895 }
71896
71897 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71898
71899 if (!pgd_none(*pgd)) {
71900 pud_t *pud = pud_offset(pgd, addr);
71901 +#ifdef CONFIG_X86
71902 + if (!pud_large(*pud))
71903 +#endif
71904 if (!pud_none(*pud)) {
71905 pmd_t *pmd = pmd_offset(pud, addr);
71906 +#ifdef CONFIG_X86
71907 + if (!pmd_large(*pmd))
71908 +#endif
71909 if (!pmd_none(*pmd)) {
71910 pte_t *ptep, pte;
71911
71912 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71913 struct vm_struct *area;
71914
71915 BUG_ON(in_interrupt());
71916 +
71917 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71918 + if (flags & VM_KERNEXEC) {
71919 + if (start != VMALLOC_START || end != VMALLOC_END)
71920 + return NULL;
71921 + start = (unsigned long)MODULES_EXEC_VADDR;
71922 + end = (unsigned long)MODULES_EXEC_END;
71923 + }
71924 +#endif
71925 +
71926 if (flags & VM_IOREMAP) {
71927 int bit = fls(size);
71928
71929 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71930 if (count > totalram_pages)
71931 return NULL;
71932
71933 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71934 + if (!(pgprot_val(prot) & _PAGE_NX))
71935 + flags |= VM_KERNEXEC;
71936 +#endif
71937 +
71938 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71939 __builtin_return_address(0));
71940 if (!area)
71941 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71942 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71943 goto fail;
71944
71945 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71946 + if (!(pgprot_val(prot) & _PAGE_NX))
71947 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71948 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71949 + else
71950 +#endif
71951 +
71952 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71953 start, end, node, gfp_mask, caller);
71954 if (!area)
71955 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71956 gfp_mask, prot, node, caller);
71957 }
71958
71959 +#undef __vmalloc
71960 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71961 {
71962 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71963 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71964 * For tight control over page level allocator and protection flags
71965 * use __vmalloc() instead.
71966 */
71967 +#undef vmalloc
71968 void *vmalloc(unsigned long size)
71969 {
71970 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71971 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71972 * For tight control over page level allocator and protection flags
71973 * use __vmalloc() instead.
71974 */
71975 +#undef vzalloc
71976 void *vzalloc(unsigned long size)
71977 {
71978 return __vmalloc_node_flags(size, -1,
71979 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71980 * The resulting memory area is zeroed so it can be mapped to userspace
71981 * without leaking data.
71982 */
71983 +#undef vmalloc_user
71984 void *vmalloc_user(unsigned long size)
71985 {
71986 struct vm_struct *area;
71987 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71988 * For tight control over page level allocator and protection flags
71989 * use __vmalloc() instead.
71990 */
71991 +#undef vmalloc_node
71992 void *vmalloc_node(unsigned long size, int node)
71993 {
71994 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71995 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71996 * For tight control over page level allocator and protection flags
71997 * use __vmalloc_node() instead.
71998 */
71999 +#undef vzalloc_node
72000 void *vzalloc_node(unsigned long size, int node)
72001 {
72002 return __vmalloc_node_flags(size, node,
72003 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
72004 * For tight control over page level allocator and protection flags
72005 * use __vmalloc() instead.
72006 */
72007 -
72008 +#undef vmalloc_exec
72009 void *vmalloc_exec(unsigned long size)
72010 {
72011 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
72012 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
72013 -1, __builtin_return_address(0));
72014 }
72015
72016 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
72017 * Allocate enough 32bit PA addressable pages to cover @size from the
72018 * page level allocator and map them into contiguous kernel virtual space.
72019 */
72020 +#undef vmalloc_32
72021 void *vmalloc_32(unsigned long size)
72022 {
72023 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
72024 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
72025 * The resulting memory area is 32bit addressable and zeroed so it can be
72026 * mapped to userspace without leaking data.
72027 */
72028 +#undef vmalloc_32_user
72029 void *vmalloc_32_user(unsigned long size)
72030 {
72031 struct vm_struct *area;
72032 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
72033 unsigned long uaddr = vma->vm_start;
72034 unsigned long usize = vma->vm_end - vma->vm_start;
72035
72036 + BUG_ON(vma->vm_mirror);
72037 +
72038 if ((PAGE_SIZE-1) & (unsigned long)addr)
72039 return -EINVAL;
72040
72041 diff --git a/mm/vmstat.c b/mm/vmstat.c
72042 index 8fd603b..cf0d930 100644
72043 --- a/mm/vmstat.c
72044 +++ b/mm/vmstat.c
72045 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
72046 *
72047 * vm_stat contains the global counters
72048 */
72049 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72050 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72051 EXPORT_SYMBOL(vm_stat);
72052
72053 #ifdef CONFIG_SMP
72054 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
72055 v = p->vm_stat_diff[i];
72056 p->vm_stat_diff[i] = 0;
72057 local_irq_restore(flags);
72058 - atomic_long_add(v, &zone->vm_stat[i]);
72059 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
72060 global_diff[i] += v;
72061 #ifdef CONFIG_NUMA
72062 /* 3 seconds idle till flush */
72063 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
72064
72065 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
72066 if (global_diff[i])
72067 - atomic_long_add(global_diff[i], &vm_stat[i]);
72068 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
72069 }
72070
72071 #endif
72072 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
72073 start_cpu_timer(cpu);
72074 #endif
72075 #ifdef CONFIG_PROC_FS
72076 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
72077 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
72078 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
72079 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
72080 + {
72081 + mode_t gr_mode = S_IRUGO;
72082 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72083 + gr_mode = S_IRUSR;
72084 +#endif
72085 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
72086 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
72087 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72088 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
72089 +#else
72090 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
72091 +#endif
72092 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
72093 + }
72094 #endif
72095 return 0;
72096 }
72097 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
72098 index 5471628..cef8398 100644
72099 --- a/net/8021q/vlan.c
72100 +++ b/net/8021q/vlan.c
72101 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
72102 err = -EPERM;
72103 if (!capable(CAP_NET_ADMIN))
72104 break;
72105 - if ((args.u.name_type >= 0) &&
72106 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
72107 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
72108 struct vlan_net *vn;
72109
72110 vn = net_generic(net, vlan_net_id);
72111 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
72112 index fdfdb57..38d368c 100644
72113 --- a/net/9p/trans_fd.c
72114 +++ b/net/9p/trans_fd.c
72115 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
72116 oldfs = get_fs();
72117 set_fs(get_ds());
72118 /* The cast to a user pointer is valid due to the set_fs() */
72119 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
72120 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
72121 set_fs(oldfs);
72122
72123 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
72124 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
72125 index f41f026..fe76ea8 100644
72126 --- a/net/atm/atm_misc.c
72127 +++ b/net/atm/atm_misc.c
72128 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
72129 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
72130 return 1;
72131 atm_return(vcc, truesize);
72132 - atomic_inc(&vcc->stats->rx_drop);
72133 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72134 return 0;
72135 }
72136 EXPORT_SYMBOL(atm_charge);
72137 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
72138 }
72139 }
72140 atm_return(vcc, guess);
72141 - atomic_inc(&vcc->stats->rx_drop);
72142 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72143 return NULL;
72144 }
72145 EXPORT_SYMBOL(atm_alloc_charge);
72146 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
72147
72148 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72149 {
72150 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72151 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72152 __SONET_ITEMS
72153 #undef __HANDLE_ITEM
72154 }
72155 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
72156
72157 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72158 {
72159 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72160 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
72161 __SONET_ITEMS
72162 #undef __HANDLE_ITEM
72163 }
72164 diff --git a/net/atm/lec.h b/net/atm/lec.h
72165 index dfc0719..47c5322 100644
72166 --- a/net/atm/lec.h
72167 +++ b/net/atm/lec.h
72168 @@ -48,7 +48,7 @@ struct lane2_ops {
72169 const u8 *tlvs, u32 sizeoftlvs);
72170 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
72171 const u8 *tlvs, u32 sizeoftlvs);
72172 -};
72173 +} __no_const;
72174
72175 /*
72176 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
72177 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
72178 index 0919a88..a23d54e 100644
72179 --- a/net/atm/mpc.h
72180 +++ b/net/atm/mpc.h
72181 @@ -33,7 +33,7 @@ struct mpoa_client {
72182 struct mpc_parameters parameters; /* parameters for this client */
72183
72184 const struct net_device_ops *old_ops;
72185 - struct net_device_ops new_ops;
72186 + net_device_ops_no_const new_ops;
72187 };
72188
72189
72190 diff --git a/net/atm/proc.c b/net/atm/proc.c
72191 index 0d020de..011c7bb 100644
72192 --- a/net/atm/proc.c
72193 +++ b/net/atm/proc.c
72194 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
72195 const struct k_atm_aal_stats *stats)
72196 {
72197 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
72198 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
72199 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
72200 - atomic_read(&stats->rx_drop));
72201 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
72202 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
72203 + atomic_read_unchecked(&stats->rx_drop));
72204 }
72205
72206 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
72207 diff --git a/net/atm/resources.c b/net/atm/resources.c
72208 index 23f45ce..c748f1a 100644
72209 --- a/net/atm/resources.c
72210 +++ b/net/atm/resources.c
72211 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
72212 static void copy_aal_stats(struct k_atm_aal_stats *from,
72213 struct atm_aal_stats *to)
72214 {
72215 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72216 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72217 __AAL_STAT_ITEMS
72218 #undef __HANDLE_ITEM
72219 }
72220 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
72221 static void subtract_aal_stats(struct k_atm_aal_stats *from,
72222 struct atm_aal_stats *to)
72223 {
72224 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72225 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
72226 __AAL_STAT_ITEMS
72227 #undef __HANDLE_ITEM
72228 }
72229 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
72230 index 3512e25..2b33401 100644
72231 --- a/net/batman-adv/bat_iv_ogm.c
72232 +++ b/net/batman-adv/bat_iv_ogm.c
72233 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72234
72235 /* change sequence number to network order */
72236 batman_ogm_packet->seqno =
72237 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
72238 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
72239
72240 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
72241 batman_ogm_packet->tt_crc = htons((uint16_t)
72242 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72243 else
72244 batman_ogm_packet->gw_flags = NO_FLAGS;
72245
72246 - atomic_inc(&hard_iface->seqno);
72247 + atomic_inc_unchecked(&hard_iface->seqno);
72248
72249 slide_own_bcast_window(hard_iface);
72250 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
72251 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
72252 return;
72253
72254 /* could be changed by schedule_own_packet() */
72255 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
72256 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
72257
72258 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
72259
72260 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
72261 index 7704df4..beb4e16 100644
72262 --- a/net/batman-adv/hard-interface.c
72263 +++ b/net/batman-adv/hard-interface.c
72264 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
72265 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
72266 dev_add_pack(&hard_iface->batman_adv_ptype);
72267
72268 - atomic_set(&hard_iface->seqno, 1);
72269 - atomic_set(&hard_iface->frag_seqno, 1);
72270 + atomic_set_unchecked(&hard_iface->seqno, 1);
72271 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
72272 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
72273 hard_iface->net_dev->name);
72274
72275 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
72276 index f9cc957..efd9dae 100644
72277 --- a/net/batman-adv/soft-interface.c
72278 +++ b/net/batman-adv/soft-interface.c
72279 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
72280
72281 /* set broadcast sequence number */
72282 bcast_packet->seqno =
72283 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72284 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72285
72286 add_bcast_packet_to_list(bat_priv, skb, 1);
72287
72288 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
72289 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72290
72291 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72292 - atomic_set(&bat_priv->bcast_seqno, 1);
72293 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72294 atomic_set(&bat_priv->ttvn, 0);
72295 atomic_set(&bat_priv->tt_local_changes, 0);
72296 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72297 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
72298 index ab8d0fe..ceba3fd 100644
72299 --- a/net/batman-adv/types.h
72300 +++ b/net/batman-adv/types.h
72301 @@ -38,8 +38,8 @@ struct hard_iface {
72302 int16_t if_num;
72303 char if_status;
72304 struct net_device *net_dev;
72305 - atomic_t seqno;
72306 - atomic_t frag_seqno;
72307 + atomic_unchecked_t seqno;
72308 + atomic_unchecked_t frag_seqno;
72309 unsigned char *packet_buff;
72310 int packet_len;
72311 struct kobject *hardif_obj;
72312 @@ -154,7 +154,7 @@ struct bat_priv {
72313 atomic_t orig_interval; /* uint */
72314 atomic_t hop_penalty; /* uint */
72315 atomic_t log_level; /* uint */
72316 - atomic_t bcast_seqno;
72317 + atomic_unchecked_t bcast_seqno;
72318 atomic_t bcast_queue_left;
72319 atomic_t batman_queue_left;
72320 atomic_t ttvn; /* translation table version number */
72321 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
72322 index 07d1c1d..7e9bea9 100644
72323 --- a/net/batman-adv/unicast.c
72324 +++ b/net/batman-adv/unicast.c
72325 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
72326 frag1->flags = UNI_FRAG_HEAD | large_tail;
72327 frag2->flags = large_tail;
72328
72329 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72330 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72331 frag1->seqno = htons(seqno - 1);
72332 frag2->seqno = htons(seqno);
72333
72334 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
72335 index c1c597e..05ebb40 100644
72336 --- a/net/bluetooth/hci_conn.c
72337 +++ b/net/bluetooth/hci_conn.c
72338 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
72339 memset(&cp, 0, sizeof(cp));
72340
72341 cp.handle = cpu_to_le16(conn->handle);
72342 - memcpy(cp.ltk, ltk, sizeof(ltk));
72343 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72344
72345 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72346 }
72347 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72348 index 17b5b1c..826d872 100644
72349 --- a/net/bluetooth/l2cap_core.c
72350 +++ b/net/bluetooth/l2cap_core.c
72351 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72352 break;
72353
72354 case L2CAP_CONF_RFC:
72355 - if (olen == sizeof(rfc))
72356 - memcpy(&rfc, (void *)val, olen);
72357 + if (olen != sizeof(rfc))
72358 + break;
72359 +
72360 + memcpy(&rfc, (void *)val, olen);
72361
72362 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72363 rfc.mode != chan->mode)
72364 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72365
72366 switch (type) {
72367 case L2CAP_CONF_RFC:
72368 - if (olen == sizeof(rfc))
72369 - memcpy(&rfc, (void *)val, olen);
72370 + if (olen != sizeof(rfc))
72371 + break;
72372 +
72373 + memcpy(&rfc, (void *)val, olen);
72374 goto done;
72375 }
72376 }
72377 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72378 index a5f4e57..910ee6d 100644
72379 --- a/net/bridge/br_multicast.c
72380 +++ b/net/bridge/br_multicast.c
72381 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72382 nexthdr = ip6h->nexthdr;
72383 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72384
72385 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72386 + if (nexthdr != IPPROTO_ICMPV6)
72387 return 0;
72388
72389 /* Okay, we found ICMPv6 header */
72390 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72391 index 5864cc4..121f3a3 100644
72392 --- a/net/bridge/netfilter/ebtables.c
72393 +++ b/net/bridge/netfilter/ebtables.c
72394 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72395 tmp.valid_hooks = t->table->valid_hooks;
72396 }
72397 mutex_unlock(&ebt_mutex);
72398 - if (copy_to_user(user, &tmp, *len) != 0){
72399 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72400 BUGPRINT("c2u Didn't work\n");
72401 ret = -EFAULT;
72402 break;
72403 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72404 index a986280..13444a1 100644
72405 --- a/net/caif/caif_socket.c
72406 +++ b/net/caif/caif_socket.c
72407 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72408 #ifdef CONFIG_DEBUG_FS
72409 struct debug_fs_counter {
72410 atomic_t caif_nr_socks;
72411 - atomic_t caif_sock_create;
72412 - atomic_t num_connect_req;
72413 - atomic_t num_connect_resp;
72414 - atomic_t num_connect_fail_resp;
72415 - atomic_t num_disconnect;
72416 - atomic_t num_remote_shutdown_ind;
72417 - atomic_t num_tx_flow_off_ind;
72418 - atomic_t num_tx_flow_on_ind;
72419 - atomic_t num_rx_flow_off;
72420 - atomic_t num_rx_flow_on;
72421 + atomic_unchecked_t caif_sock_create;
72422 + atomic_unchecked_t num_connect_req;
72423 + atomic_unchecked_t num_connect_resp;
72424 + atomic_unchecked_t num_connect_fail_resp;
72425 + atomic_unchecked_t num_disconnect;
72426 + atomic_unchecked_t num_remote_shutdown_ind;
72427 + atomic_unchecked_t num_tx_flow_off_ind;
72428 + atomic_unchecked_t num_tx_flow_on_ind;
72429 + atomic_unchecked_t num_rx_flow_off;
72430 + atomic_unchecked_t num_rx_flow_on;
72431 };
72432 static struct debug_fs_counter cnt;
72433 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72434 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72435 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72436 #else
72437 #define dbfs_atomic_inc(v) 0
72438 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72439 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72440 sk_rcvbuf_lowwater(cf_sk));
72441 set_rx_flow_off(cf_sk);
72442 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72443 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72444 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72445 }
72446
72447 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72448 set_rx_flow_off(cf_sk);
72449 if (net_ratelimit())
72450 pr_debug("sending flow OFF due to rmem_schedule\n");
72451 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72452 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72453 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72454 }
72455 skb->dev = NULL;
72456 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72457 switch (flow) {
72458 case CAIF_CTRLCMD_FLOW_ON_IND:
72459 /* OK from modem to start sending again */
72460 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72461 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72462 set_tx_flow_on(cf_sk);
72463 cf_sk->sk.sk_state_change(&cf_sk->sk);
72464 break;
72465
72466 case CAIF_CTRLCMD_FLOW_OFF_IND:
72467 /* Modem asks us to shut up */
72468 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72469 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72470 set_tx_flow_off(cf_sk);
72471 cf_sk->sk.sk_state_change(&cf_sk->sk);
72472 break;
72473 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72474 /* We're now connected */
72475 caif_client_register_refcnt(&cf_sk->layer,
72476 cfsk_hold, cfsk_put);
72477 - dbfs_atomic_inc(&cnt.num_connect_resp);
72478 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72479 cf_sk->sk.sk_state = CAIF_CONNECTED;
72480 set_tx_flow_on(cf_sk);
72481 cf_sk->sk.sk_state_change(&cf_sk->sk);
72482 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72483
72484 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72485 /* Connect request failed */
72486 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72487 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72488 cf_sk->sk.sk_err = ECONNREFUSED;
72489 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72490 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72491 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72492
72493 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72494 /* Modem has closed this connection, or device is down. */
72495 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72496 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72497 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72498 cf_sk->sk.sk_err = ECONNRESET;
72499 set_rx_flow_on(cf_sk);
72500 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72501 return;
72502
72503 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72504 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
72505 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72506 set_rx_flow_on(cf_sk);
72507 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72508 }
72509 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72510 /*ifindex = id of the interface.*/
72511 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72512
72513 - dbfs_atomic_inc(&cnt.num_connect_req);
72514 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72515 cf_sk->layer.receive = caif_sktrecv_cb;
72516
72517 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72518 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72519 spin_unlock_bh(&sk->sk_receive_queue.lock);
72520 sock->sk = NULL;
72521
72522 - dbfs_atomic_inc(&cnt.num_disconnect);
72523 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72524
72525 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72526 if (cf_sk->debugfs_socket_dir != NULL)
72527 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72528 cf_sk->conn_req.protocol = protocol;
72529 /* Increase the number of sockets created. */
72530 dbfs_atomic_inc(&cnt.caif_nr_socks);
72531 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
72532 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72533 #ifdef CONFIG_DEBUG_FS
72534 if (!IS_ERR(debugfsdir)) {
72535
72536 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72537 index 5cf5222..6f704ad 100644
72538 --- a/net/caif/cfctrl.c
72539 +++ b/net/caif/cfctrl.c
72540 @@ -9,6 +9,7 @@
72541 #include <linux/stddef.h>
72542 #include <linux/spinlock.h>
72543 #include <linux/slab.h>
72544 +#include <linux/sched.h>
72545 #include <net/caif/caif_layer.h>
72546 #include <net/caif/cfpkt.h>
72547 #include <net/caif/cfctrl.h>
72548 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72549 memset(&dev_info, 0, sizeof(dev_info));
72550 dev_info.id = 0xff;
72551 cfsrvl_init(&this->serv, 0, &dev_info, false);
72552 - atomic_set(&this->req_seq_no, 1);
72553 - atomic_set(&this->rsp_seq_no, 1);
72554 + atomic_set_unchecked(&this->req_seq_no, 1);
72555 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72556 this->serv.layer.receive = cfctrl_recv;
72557 sprintf(this->serv.layer.name, "ctrl");
72558 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72559 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72560 struct cfctrl_request_info *req)
72561 {
72562 spin_lock_bh(&ctrl->info_list_lock);
72563 - atomic_inc(&ctrl->req_seq_no);
72564 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72565 + atomic_inc_unchecked(&ctrl->req_seq_no);
72566 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72567 list_add_tail(&req->list, &ctrl->list);
72568 spin_unlock_bh(&ctrl->info_list_lock);
72569 }
72570 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72571 if (p != first)
72572 pr_warn("Requests are not received in order\n");
72573
72574 - atomic_set(&ctrl->rsp_seq_no,
72575 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72576 p->sequence_no);
72577 list_del(&p->list);
72578 goto out;
72579 diff --git a/net/can/gw.c b/net/can/gw.c
72580 index 3d79b12..8de85fa 100644
72581 --- a/net/can/gw.c
72582 +++ b/net/can/gw.c
72583 @@ -96,7 +96,7 @@ struct cf_mod {
72584 struct {
72585 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72586 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72587 - } csumfunc;
72588 + } __no_const csumfunc;
72589 };
72590
72591
72592 diff --git a/net/compat.c b/net/compat.c
72593 index 6def90e..c6992fa 100644
72594 --- a/net/compat.c
72595 +++ b/net/compat.c
72596 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72597 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72598 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72599 return -EFAULT;
72600 - kmsg->msg_name = compat_ptr(tmp1);
72601 - kmsg->msg_iov = compat_ptr(tmp2);
72602 - kmsg->msg_control = compat_ptr(tmp3);
72603 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72604 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72605 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72606 return 0;
72607 }
72608
72609 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72610
72611 if (kern_msg->msg_namelen) {
72612 if (mode == VERIFY_READ) {
72613 - int err = move_addr_to_kernel(kern_msg->msg_name,
72614 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72615 kern_msg->msg_namelen,
72616 kern_address);
72617 if (err < 0)
72618 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72619 kern_msg->msg_name = NULL;
72620
72621 tot_len = iov_from_user_compat_to_kern(kern_iov,
72622 - (struct compat_iovec __user *)kern_msg->msg_iov,
72623 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72624 kern_msg->msg_iovlen);
72625 if (tot_len >= 0)
72626 kern_msg->msg_iov = kern_iov;
72627 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72628
72629 #define CMSG_COMPAT_FIRSTHDR(msg) \
72630 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72631 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72632 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72633 (struct compat_cmsghdr __user *)NULL)
72634
72635 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72636 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72637 (ucmlen) <= (unsigned long) \
72638 ((mhdr)->msg_controllen - \
72639 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72640 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72641
72642 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72643 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72644 {
72645 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72646 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72647 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72648 msg->msg_controllen)
72649 return NULL;
72650 return (struct compat_cmsghdr __user *)ptr;
72651 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72652 {
72653 struct compat_timeval ctv;
72654 struct compat_timespec cts[3];
72655 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72656 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72657 struct compat_cmsghdr cmhdr;
72658 int cmlen;
72659
72660 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72661
72662 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72663 {
72664 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72665 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72666 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72667 int fdnum = scm->fp->count;
72668 struct file **fp = scm->fp->fp;
72669 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72670 return -EFAULT;
72671 old_fs = get_fs();
72672 set_fs(KERNEL_DS);
72673 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72674 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72675 set_fs(old_fs);
72676
72677 return err;
72678 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72679 len = sizeof(ktime);
72680 old_fs = get_fs();
72681 set_fs(KERNEL_DS);
72682 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72683 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72684 set_fs(old_fs);
72685
72686 if (!err) {
72687 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72688 case MCAST_JOIN_GROUP:
72689 case MCAST_LEAVE_GROUP:
72690 {
72691 - struct compat_group_req __user *gr32 = (void *)optval;
72692 + struct compat_group_req __user *gr32 = (void __user *)optval;
72693 struct group_req __user *kgr =
72694 compat_alloc_user_space(sizeof(struct group_req));
72695 u32 interface;
72696 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72697 case MCAST_BLOCK_SOURCE:
72698 case MCAST_UNBLOCK_SOURCE:
72699 {
72700 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72701 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72702 struct group_source_req __user *kgsr = compat_alloc_user_space(
72703 sizeof(struct group_source_req));
72704 u32 interface;
72705 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72706 }
72707 case MCAST_MSFILTER:
72708 {
72709 - struct compat_group_filter __user *gf32 = (void *)optval;
72710 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72711 struct group_filter __user *kgf;
72712 u32 interface, fmode, numsrc;
72713
72714 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72715 char __user *optval, int __user *optlen,
72716 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72717 {
72718 - struct compat_group_filter __user *gf32 = (void *)optval;
72719 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72720 struct group_filter __user *kgf;
72721 int __user *koptlen;
72722 u32 interface, fmode, numsrc;
72723 diff --git a/net/core/datagram.c b/net/core/datagram.c
72724 index 68bbf9f..5ef0d12 100644
72725 --- a/net/core/datagram.c
72726 +++ b/net/core/datagram.c
72727 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72728 }
72729
72730 kfree_skb(skb);
72731 - atomic_inc(&sk->sk_drops);
72732 + atomic_inc_unchecked(&sk->sk_drops);
72733 sk_mem_reclaim_partial(sk);
72734
72735 return err;
72736 diff --git a/net/core/dev.c b/net/core/dev.c
72737 index 5a13edf..a6f2bd2 100644
72738 --- a/net/core/dev.c
72739 +++ b/net/core/dev.c
72740 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72741 if (no_module && capable(CAP_NET_ADMIN))
72742 no_module = request_module("netdev-%s", name);
72743 if (no_module && capable(CAP_SYS_MODULE)) {
72744 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72745 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72746 +#else
72747 if (!request_module("%s", name))
72748 pr_err("Loading kernel module for a network device "
72749 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72750 "instead\n", name);
72751 +#endif
72752 }
72753 }
72754 EXPORT_SYMBOL(dev_load);
72755 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72756 {
72757 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72758 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72759 - atomic_long_inc(&dev->rx_dropped);
72760 + atomic_long_inc_unchecked(&dev->rx_dropped);
72761 kfree_skb(skb);
72762 return NET_RX_DROP;
72763 }
72764 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72765 nf_reset(skb);
72766
72767 if (unlikely(!is_skb_forwardable(dev, skb))) {
72768 - atomic_long_inc(&dev->rx_dropped);
72769 + atomic_long_inc_unchecked(&dev->rx_dropped);
72770 kfree_skb(skb);
72771 return NET_RX_DROP;
72772 }
72773 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72774
72775 struct dev_gso_cb {
72776 void (*destructor)(struct sk_buff *skb);
72777 -};
72778 +} __no_const;
72779
72780 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72781
72782 @@ -2970,7 +2974,7 @@ enqueue:
72783
72784 local_irq_restore(flags);
72785
72786 - atomic_long_inc(&skb->dev->rx_dropped);
72787 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72788 kfree_skb(skb);
72789 return NET_RX_DROP;
72790 }
72791 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72792 }
72793 EXPORT_SYMBOL(netif_rx_ni);
72794
72795 -static void net_tx_action(struct softirq_action *h)
72796 +static void net_tx_action(void)
72797 {
72798 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72799
72800 @@ -3333,7 +3337,7 @@ ncls:
72801 if (pt_prev) {
72802 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72803 } else {
72804 - atomic_long_inc(&skb->dev->rx_dropped);
72805 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72806 kfree_skb(skb);
72807 /* Jamal, now you will not able to escape explaining
72808 * me how you were going to use this. :-)
72809 @@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72810 }
72811 EXPORT_SYMBOL(netif_napi_del);
72812
72813 -static void net_rx_action(struct softirq_action *h)
72814 +static void net_rx_action(void)
72815 {
72816 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72817 unsigned long time_limit = jiffies + 2;
72818 @@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72819 } else {
72820 netdev_stats_to_stats64(storage, &dev->stats);
72821 }
72822 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72823 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72824 return storage;
72825 }
72826 EXPORT_SYMBOL(dev_get_stats);
72827 diff --git a/net/core/flow.c b/net/core/flow.c
72828 index e318c7e..168b1d0 100644
72829 --- a/net/core/flow.c
72830 +++ b/net/core/flow.c
72831 @@ -61,7 +61,7 @@ struct flow_cache {
72832 struct timer_list rnd_timer;
72833 };
72834
72835 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72836 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72837 EXPORT_SYMBOL(flow_cache_genid);
72838 static struct flow_cache flow_cache_global;
72839 static struct kmem_cache *flow_cachep __read_mostly;
72840 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72841
72842 static int flow_entry_valid(struct flow_cache_entry *fle)
72843 {
72844 - if (atomic_read(&flow_cache_genid) != fle->genid)
72845 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72846 return 0;
72847 if (fle->object && !fle->object->ops->check(fle->object))
72848 return 0;
72849 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72850 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72851 fcp->hash_count++;
72852 }
72853 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72854 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72855 flo = fle->object;
72856 if (!flo)
72857 goto ret_object;
72858 @@ -280,7 +280,7 @@ nocache:
72859 }
72860 flo = resolver(net, key, family, dir, flo, ctx);
72861 if (fle) {
72862 - fle->genid = atomic_read(&flow_cache_genid);
72863 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72864 if (!IS_ERR(flo))
72865 fle->object = flo;
72866 else
72867 diff --git a/net/core/iovec.c b/net/core/iovec.c
72868 index c40f27e..7f49254 100644
72869 --- a/net/core/iovec.c
72870 +++ b/net/core/iovec.c
72871 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72872 if (m->msg_namelen) {
72873 if (mode == VERIFY_READ) {
72874 void __user *namep;
72875 - namep = (void __user __force *) m->msg_name;
72876 + namep = (void __force_user *) m->msg_name;
72877 err = move_addr_to_kernel(namep, m->msg_namelen,
72878 address);
72879 if (err < 0)
72880 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72881 }
72882
72883 size = m->msg_iovlen * sizeof(struct iovec);
72884 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72885 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72886 return -EFAULT;
72887
72888 m->msg_iov = iov;
72889 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72890 index 9083e82..1673203 100644
72891 --- a/net/core/rtnetlink.c
72892 +++ b/net/core/rtnetlink.c
72893 @@ -57,7 +57,7 @@ struct rtnl_link {
72894 rtnl_doit_func doit;
72895 rtnl_dumpit_func dumpit;
72896 rtnl_calcit_func calcit;
72897 -};
72898 +} __no_const;
72899
72900 static DEFINE_MUTEX(rtnl_mutex);
72901 static u16 min_ifinfo_dump_size;
72902 diff --git a/net/core/scm.c b/net/core/scm.c
72903 index ff52ad0..aff1c0f 100644
72904 --- a/net/core/scm.c
72905 +++ b/net/core/scm.c
72906 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72907 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72908 {
72909 struct cmsghdr __user *cm
72910 - = (__force struct cmsghdr __user *)msg->msg_control;
72911 + = (struct cmsghdr __force_user *)msg->msg_control;
72912 struct cmsghdr cmhdr;
72913 int cmlen = CMSG_LEN(len);
72914 int err;
72915 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72916 err = -EFAULT;
72917 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72918 goto out;
72919 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72920 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72921 goto out;
72922 cmlen = CMSG_SPACE(len);
72923 if (msg->msg_controllen < cmlen)
72924 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72925 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72926 {
72927 struct cmsghdr __user *cm
72928 - = (__force struct cmsghdr __user*)msg->msg_control;
72929 + = (struct cmsghdr __force_user *)msg->msg_control;
72930
72931 int fdmax = 0;
72932 int fdnum = scm->fp->count;
72933 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72934 if (fdnum < fdmax)
72935 fdmax = fdnum;
72936
72937 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72938 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72939 i++, cmfptr++)
72940 {
72941 int new_fd;
72942 diff --git a/net/core/sock.c b/net/core/sock.c
72943 index b23f174..b9a0d26 100644
72944 --- a/net/core/sock.c
72945 +++ b/net/core/sock.c
72946 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72947 struct sk_buff_head *list = &sk->sk_receive_queue;
72948
72949 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72950 - atomic_inc(&sk->sk_drops);
72951 + atomic_inc_unchecked(&sk->sk_drops);
72952 trace_sock_rcvqueue_full(sk, skb);
72953 return -ENOMEM;
72954 }
72955 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72956 return err;
72957
72958 if (!sk_rmem_schedule(sk, skb->truesize)) {
72959 - atomic_inc(&sk->sk_drops);
72960 + atomic_inc_unchecked(&sk->sk_drops);
72961 return -ENOBUFS;
72962 }
72963
72964 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72965 skb_dst_force(skb);
72966
72967 spin_lock_irqsave(&list->lock, flags);
72968 - skb->dropcount = atomic_read(&sk->sk_drops);
72969 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72970 __skb_queue_tail(list, skb);
72971 spin_unlock_irqrestore(&list->lock, flags);
72972
72973 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72974 skb->dev = NULL;
72975
72976 if (sk_rcvqueues_full(sk, skb)) {
72977 - atomic_inc(&sk->sk_drops);
72978 + atomic_inc_unchecked(&sk->sk_drops);
72979 goto discard_and_relse;
72980 }
72981 if (nested)
72982 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72983 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72984 } else if (sk_add_backlog(sk, skb)) {
72985 bh_unlock_sock(sk);
72986 - atomic_inc(&sk->sk_drops);
72987 + atomic_inc_unchecked(&sk->sk_drops);
72988 goto discard_and_relse;
72989 }
72990
72991 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72992 if (len > sizeof(peercred))
72993 len = sizeof(peercred);
72994 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72995 - if (copy_to_user(optval, &peercred, len))
72996 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72997 return -EFAULT;
72998 goto lenout;
72999 }
73000 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
73001 return -ENOTCONN;
73002 if (lv < len)
73003 return -EINVAL;
73004 - if (copy_to_user(optval, address, len))
73005 + if (len > sizeof(address) || copy_to_user(optval, address, len))
73006 return -EFAULT;
73007 goto lenout;
73008 }
73009 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
73010
73011 if (len > lv)
73012 len = lv;
73013 - if (copy_to_user(optval, &v, len))
73014 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
73015 return -EFAULT;
73016 lenout:
73017 if (put_user(len, optlen))
73018 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
73019 */
73020 smp_wmb();
73021 atomic_set(&sk->sk_refcnt, 1);
73022 - atomic_set(&sk->sk_drops, 0);
73023 + atomic_set_unchecked(&sk->sk_drops, 0);
73024 }
73025 EXPORT_SYMBOL(sock_init_data);
73026
73027 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
73028 index 02e75d1..9a57a7c 100644
73029 --- a/net/decnet/sysctl_net_decnet.c
73030 +++ b/net/decnet/sysctl_net_decnet.c
73031 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
73032
73033 if (len > *lenp) len = *lenp;
73034
73035 - if (copy_to_user(buffer, addr, len))
73036 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
73037 return -EFAULT;
73038
73039 *lenp = len;
73040 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
73041
73042 if (len > *lenp) len = *lenp;
73043
73044 - if (copy_to_user(buffer, devname, len))
73045 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
73046 return -EFAULT;
73047
73048 *lenp = len;
73049 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
73050 index 39a2d29..f39c0fe 100644
73051 --- a/net/econet/Kconfig
73052 +++ b/net/econet/Kconfig
73053 @@ -4,7 +4,7 @@
73054
73055 config ECONET
73056 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
73057 - depends on EXPERIMENTAL && INET
73058 + depends on EXPERIMENTAL && INET && BROKEN
73059 ---help---
73060 Econet is a fairly old and slow networking protocol mainly used by
73061 Acorn computers to access file and print servers. It uses native
73062 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
73063 index 92fc5f6..b790d91 100644
73064 --- a/net/ipv4/fib_frontend.c
73065 +++ b/net/ipv4/fib_frontend.c
73066 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
73067 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73068 fib_sync_up(dev);
73069 #endif
73070 - atomic_inc(&net->ipv4.dev_addr_genid);
73071 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73072 rt_cache_flush(dev_net(dev), -1);
73073 break;
73074 case NETDEV_DOWN:
73075 fib_del_ifaddr(ifa, NULL);
73076 - atomic_inc(&net->ipv4.dev_addr_genid);
73077 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73078 if (ifa->ifa_dev->ifa_list == NULL) {
73079 /* Last address was deleted from this interface.
73080 * Disable IP.
73081 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
73082 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73083 fib_sync_up(dev);
73084 #endif
73085 - atomic_inc(&net->ipv4.dev_addr_genid);
73086 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73087 rt_cache_flush(dev_net(dev), -1);
73088 break;
73089 case NETDEV_DOWN:
73090 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
73091 index 80106d8..232e898 100644
73092 --- a/net/ipv4/fib_semantics.c
73093 +++ b/net/ipv4/fib_semantics.c
73094 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
73095 nh->nh_saddr = inet_select_addr(nh->nh_dev,
73096 nh->nh_gw,
73097 nh->nh_parent->fib_scope);
73098 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
73099 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
73100
73101 return nh->nh_saddr;
73102 }
73103 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
73104 index ccee270..db23c3c 100644
73105 --- a/net/ipv4/inet_diag.c
73106 +++ b/net/ipv4/inet_diag.c
73107 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
73108 r->idiag_retrans = 0;
73109
73110 r->id.idiag_if = sk->sk_bound_dev_if;
73111 +
73112 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73113 + r->id.idiag_cookie[0] = 0;
73114 + r->id.idiag_cookie[1] = 0;
73115 +#else
73116 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
73117 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
73118 +#endif
73119
73120 r->id.idiag_sport = inet->inet_sport;
73121 r->id.idiag_dport = inet->inet_dport;
73122 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
73123 r->idiag_family = tw->tw_family;
73124 r->idiag_retrans = 0;
73125 r->id.idiag_if = tw->tw_bound_dev_if;
73126 +
73127 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73128 + r->id.idiag_cookie[0] = 0;
73129 + r->id.idiag_cookie[1] = 0;
73130 +#else
73131 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
73132 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
73133 +#endif
73134 +
73135 r->id.idiag_sport = tw->tw_sport;
73136 r->id.idiag_dport = tw->tw_dport;
73137 r->id.idiag_src[0] = tw->tw_rcv_saddr;
73138 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
73139 if (sk == NULL)
73140 goto unlock;
73141
73142 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73143 err = -ESTALE;
73144 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
73145 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
73146 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
73147 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
73148 goto out;
73149 +#endif
73150
73151 err = -ENOMEM;
73152 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
73153 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
73154 r->idiag_retrans = req->retrans;
73155
73156 r->id.idiag_if = sk->sk_bound_dev_if;
73157 +
73158 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73159 + r->id.idiag_cookie[0] = 0;
73160 + r->id.idiag_cookie[1] = 0;
73161 +#else
73162 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
73163 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
73164 +#endif
73165
73166 tmo = req->expires - jiffies;
73167 if (tmo < 0)
73168 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
73169 index 984ec65..97ac518 100644
73170 --- a/net/ipv4/inet_hashtables.c
73171 +++ b/net/ipv4/inet_hashtables.c
73172 @@ -18,12 +18,15 @@
73173 #include <linux/sched.h>
73174 #include <linux/slab.h>
73175 #include <linux/wait.h>
73176 +#include <linux/security.h>
73177
73178 #include <net/inet_connection_sock.h>
73179 #include <net/inet_hashtables.h>
73180 #include <net/secure_seq.h>
73181 #include <net/ip.h>
73182
73183 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
73184 +
73185 /*
73186 * Allocate and initialize a new local port bind bucket.
73187 * The bindhash mutex for snum's hash chain must be held here.
73188 @@ -530,6 +533,8 @@ ok:
73189 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
73190 spin_unlock(&head->lock);
73191
73192 + gr_update_task_in_ip_table(current, inet_sk(sk));
73193 +
73194 if (tw) {
73195 inet_twsk_deschedule(tw, death_row);
73196 while (twrefcnt) {
73197 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
73198 index 86f13c67..59a35b5 100644
73199 --- a/net/ipv4/inetpeer.c
73200 +++ b/net/ipv4/inetpeer.c
73201 @@ -436,8 +436,8 @@ relookup:
73202 if (p) {
73203 p->daddr = *daddr;
73204 atomic_set(&p->refcnt, 1);
73205 - atomic_set(&p->rid, 0);
73206 - atomic_set(&p->ip_id_count,
73207 + atomic_set_unchecked(&p->rid, 0);
73208 + atomic_set_unchecked(&p->ip_id_count,
73209 (daddr->family == AF_INET) ?
73210 secure_ip_id(daddr->addr.a4) :
73211 secure_ipv6_id(daddr->addr.a6));
73212 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
73213 index fdaabf2..0ec3205 100644
73214 --- a/net/ipv4/ip_fragment.c
73215 +++ b/net/ipv4/ip_fragment.c
73216 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
73217 return 0;
73218
73219 start = qp->rid;
73220 - end = atomic_inc_return(&peer->rid);
73221 + end = atomic_inc_return_unchecked(&peer->rid);
73222 qp->rid = end;
73223
73224 rc = qp->q.fragments && (end - start) > max;
73225 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
73226 index 09ff51b..d3968eb 100644
73227 --- a/net/ipv4/ip_sockglue.c
73228 +++ b/net/ipv4/ip_sockglue.c
73229 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73230 len = min_t(unsigned int, len, opt->optlen);
73231 if (put_user(len, optlen))
73232 return -EFAULT;
73233 - if (copy_to_user(optval, opt->__data, len))
73234 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
73235 + copy_to_user(optval, opt->__data, len))
73236 return -EFAULT;
73237 return 0;
73238 }
73239 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73240 if (sk->sk_type != SOCK_STREAM)
73241 return -ENOPROTOOPT;
73242
73243 - msg.msg_control = optval;
73244 + msg.msg_control = (void __force_kernel *)optval;
73245 msg.msg_controllen = len;
73246 msg.msg_flags = flags;
73247
73248 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
73249 index 99ec116..c5628fe 100644
73250 --- a/net/ipv4/ipconfig.c
73251 +++ b/net/ipv4/ipconfig.c
73252 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
73253
73254 mm_segment_t oldfs = get_fs();
73255 set_fs(get_ds());
73256 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73257 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73258 set_fs(oldfs);
73259 return res;
73260 }
73261 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
73262
73263 mm_segment_t oldfs = get_fs();
73264 set_fs(get_ds());
73265 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73266 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73267 set_fs(oldfs);
73268 return res;
73269 }
73270 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
73271
73272 mm_segment_t oldfs = get_fs();
73273 set_fs(get_ds());
73274 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
73275 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73276 set_fs(oldfs);
73277 return res;
73278 }
73279 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73280 index 2133c30..5c4b40b 100644
73281 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
73282 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73283 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
73284
73285 *len = 0;
73286
73287 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73288 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73289 if (*octets == NULL)
73290 return 0;
73291
73292 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
73293 index 43d4c3b..1914409 100644
73294 --- a/net/ipv4/ping.c
73295 +++ b/net/ipv4/ping.c
73296 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
73297 sk_rmem_alloc_get(sp),
73298 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73299 atomic_read(&sp->sk_refcnt), sp,
73300 - atomic_read(&sp->sk_drops), len);
73301 + atomic_read_unchecked(&sp->sk_drops), len);
73302 }
73303
73304 static int ping_seq_show(struct seq_file *seq, void *v)
73305 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
73306 index 007e2eb..85a18a0 100644
73307 --- a/net/ipv4/raw.c
73308 +++ b/net/ipv4/raw.c
73309 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
73310 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73311 {
73312 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73313 - atomic_inc(&sk->sk_drops);
73314 + atomic_inc_unchecked(&sk->sk_drops);
73315 kfree_skb(skb);
73316 return NET_RX_DROP;
73317 }
73318 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
73319
73320 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73321 {
73322 + struct icmp_filter filter;
73323 +
73324 if (optlen > sizeof(struct icmp_filter))
73325 optlen = sizeof(struct icmp_filter);
73326 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73327 + if (copy_from_user(&filter, optval, optlen))
73328 return -EFAULT;
73329 + raw_sk(sk)->filter = filter;
73330 return 0;
73331 }
73332
73333 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73334 {
73335 int len, ret = -EFAULT;
73336 + struct icmp_filter filter;
73337
73338 if (get_user(len, optlen))
73339 goto out;
73340 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73341 if (len > sizeof(struct icmp_filter))
73342 len = sizeof(struct icmp_filter);
73343 ret = -EFAULT;
73344 - if (put_user(len, optlen) ||
73345 - copy_to_user(optval, &raw_sk(sk)->filter, len))
73346 + filter = raw_sk(sk)->filter;
73347 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73348 goto out;
73349 ret = 0;
73350 out: return ret;
73351 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73352 sk_wmem_alloc_get(sp),
73353 sk_rmem_alloc_get(sp),
73354 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73355 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73356 + atomic_read(&sp->sk_refcnt),
73357 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73358 + NULL,
73359 +#else
73360 + sp,
73361 +#endif
73362 + atomic_read_unchecked(&sp->sk_drops));
73363 }
73364
73365 static int raw_seq_show(struct seq_file *seq, void *v)
73366 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73367 index 94cdbc5..0cb0063 100644
73368 --- a/net/ipv4/route.c
73369 +++ b/net/ipv4/route.c
73370 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73371
73372 static inline int rt_genid(struct net *net)
73373 {
73374 - return atomic_read(&net->ipv4.rt_genid);
73375 + return atomic_read_unchecked(&net->ipv4.rt_genid);
73376 }
73377
73378 #ifdef CONFIG_PROC_FS
73379 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73380 unsigned char shuffle;
73381
73382 get_random_bytes(&shuffle, sizeof(shuffle));
73383 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73384 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73385 redirect_genid++;
73386 }
73387
73388 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73389 error = rt->dst.error;
73390 if (peer) {
73391 inet_peer_refcheck(rt->peer);
73392 - id = atomic_read(&peer->ip_id_count) & 0xffff;
73393 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73394 if (peer->tcp_ts_stamp) {
73395 ts = peer->tcp_ts;
73396 tsage = get_seconds() - peer->tcp_ts_stamp;
73397 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73398 index c89e354..8bd55c8 100644
73399 --- a/net/ipv4/tcp_ipv4.c
73400 +++ b/net/ipv4/tcp_ipv4.c
73401 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73402 int sysctl_tcp_low_latency __read_mostly;
73403 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73404
73405 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73406 +extern int grsec_enable_blackhole;
73407 +#endif
73408
73409 #ifdef CONFIG_TCP_MD5SIG
73410 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73411 @@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73412 return 0;
73413
73414 reset:
73415 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73416 + if (!grsec_enable_blackhole)
73417 +#endif
73418 tcp_v4_send_reset(rsk, skb);
73419 discard:
73420 kfree_skb(skb);
73421 @@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73422 TCP_SKB_CB(skb)->sacked = 0;
73423
73424 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73425 - if (!sk)
73426 + if (!sk) {
73427 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73428 + ret = 1;
73429 +#endif
73430 goto no_tcp_socket;
73431 -
73432 + }
73433 process:
73434 - if (sk->sk_state == TCP_TIME_WAIT)
73435 + if (sk->sk_state == TCP_TIME_WAIT) {
73436 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73437 + ret = 2;
73438 +#endif
73439 goto do_time_wait;
73440 + }
73441
73442 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73443 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73444 @@ -1744,6 +1757,10 @@ no_tcp_socket:
73445 bad_packet:
73446 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73447 } else {
73448 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73449 + if (!grsec_enable_blackhole || (ret == 1 &&
73450 + (skb->dev->flags & IFF_LOOPBACK)))
73451 +#endif
73452 tcp_v4_send_reset(NULL, skb);
73453 }
73454
73455 @@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73456 0, /* non standard timer */
73457 0, /* open_requests have no inode */
73458 atomic_read(&sk->sk_refcnt),
73459 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73460 + NULL,
73461 +#else
73462 req,
73463 +#endif
73464 len);
73465 }
73466
73467 @@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73468 sock_i_uid(sk),
73469 icsk->icsk_probes_out,
73470 sock_i_ino(sk),
73471 - atomic_read(&sk->sk_refcnt), sk,
73472 + atomic_read(&sk->sk_refcnt),
73473 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73474 + NULL,
73475 +#else
73476 + sk,
73477 +#endif
73478 jiffies_to_clock_t(icsk->icsk_rto),
73479 jiffies_to_clock_t(icsk->icsk_ack.ato),
73480 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73481 @@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73482 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73483 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73484 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73485 - atomic_read(&tw->tw_refcnt), tw, len);
73486 + atomic_read(&tw->tw_refcnt),
73487 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73488 + NULL,
73489 +#else
73490 + tw,
73491 +#endif
73492 + len);
73493 }
73494
73495 #define TMPSZ 150
73496 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73497 index 66363b6..b0654a3 100644
73498 --- a/net/ipv4/tcp_minisocks.c
73499 +++ b/net/ipv4/tcp_minisocks.c
73500 @@ -27,6 +27,10 @@
73501 #include <net/inet_common.h>
73502 #include <net/xfrm.h>
73503
73504 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73505 +extern int grsec_enable_blackhole;
73506 +#endif
73507 +
73508 int sysctl_tcp_syncookies __read_mostly = 1;
73509 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73510
73511 @@ -751,6 +755,10 @@ listen_overflow:
73512
73513 embryonic_reset:
73514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73515 +
73516 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73517 + if (!grsec_enable_blackhole)
73518 +#endif
73519 if (!(flg & TCP_FLAG_RST))
73520 req->rsk_ops->send_reset(sk, skb);
73521
73522 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73523 index 85ee7eb..53277ab 100644
73524 --- a/net/ipv4/tcp_probe.c
73525 +++ b/net/ipv4/tcp_probe.c
73526 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73527 if (cnt + width >= len)
73528 break;
73529
73530 - if (copy_to_user(buf + cnt, tbuf, width))
73531 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73532 return -EFAULT;
73533 cnt += width;
73534 }
73535 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73536 index 2e0f0af..e2948bf 100644
73537 --- a/net/ipv4/tcp_timer.c
73538 +++ b/net/ipv4/tcp_timer.c
73539 @@ -22,6 +22,10 @@
73540 #include <linux/gfp.h>
73541 #include <net/tcp.h>
73542
73543 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73544 +extern int grsec_lastack_retries;
73545 +#endif
73546 +
73547 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73548 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73549 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73550 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73551 }
73552 }
73553
73554 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73555 + if ((sk->sk_state == TCP_LAST_ACK) &&
73556 + (grsec_lastack_retries > 0) &&
73557 + (grsec_lastack_retries < retry_until))
73558 + retry_until = grsec_lastack_retries;
73559 +#endif
73560 +
73561 if (retransmits_timed_out(sk, retry_until,
73562 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73563 /* Has it gone just too far? */
73564 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73565 index 5a65eea..bd913a1 100644
73566 --- a/net/ipv4/udp.c
73567 +++ b/net/ipv4/udp.c
73568 @@ -86,6 +86,7 @@
73569 #include <linux/types.h>
73570 #include <linux/fcntl.h>
73571 #include <linux/module.h>
73572 +#include <linux/security.h>
73573 #include <linux/socket.h>
73574 #include <linux/sockios.h>
73575 #include <linux/igmp.h>
73576 @@ -108,6 +109,10 @@
73577 #include <trace/events/udp.h>
73578 #include "udp_impl.h"
73579
73580 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73581 +extern int grsec_enable_blackhole;
73582 +#endif
73583 +
73584 struct udp_table udp_table __read_mostly;
73585 EXPORT_SYMBOL(udp_table);
73586
73587 @@ -565,6 +570,9 @@ found:
73588 return s;
73589 }
73590
73591 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73592 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73593 +
73594 /*
73595 * This routine is called by the ICMP module when it gets some
73596 * sort of error condition. If err < 0 then the socket should
73597 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73598 dport = usin->sin_port;
73599 if (dport == 0)
73600 return -EINVAL;
73601 +
73602 + err = gr_search_udp_sendmsg(sk, usin);
73603 + if (err)
73604 + return err;
73605 } else {
73606 if (sk->sk_state != TCP_ESTABLISHED)
73607 return -EDESTADDRREQ;
73608 +
73609 + err = gr_search_udp_sendmsg(sk, NULL);
73610 + if (err)
73611 + return err;
73612 +
73613 daddr = inet->inet_daddr;
73614 dport = inet->inet_dport;
73615 /* Open fast path for connected socket.
73616 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73617 udp_lib_checksum_complete(skb)) {
73618 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73619 IS_UDPLITE(sk));
73620 - atomic_inc(&sk->sk_drops);
73621 + atomic_inc_unchecked(&sk->sk_drops);
73622 __skb_unlink(skb, rcvq);
73623 __skb_queue_tail(&list_kill, skb);
73624 }
73625 @@ -1185,6 +1202,10 @@ try_again:
73626 if (!skb)
73627 goto out;
73628
73629 + err = gr_search_udp_recvmsg(sk, skb);
73630 + if (err)
73631 + goto out_free;
73632 +
73633 ulen = skb->len - sizeof(struct udphdr);
73634 copied = len;
73635 if (copied > ulen)
73636 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73637
73638 drop:
73639 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73640 - atomic_inc(&sk->sk_drops);
73641 + atomic_inc_unchecked(&sk->sk_drops);
73642 kfree_skb(skb);
73643 return -1;
73644 }
73645 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73646 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73647
73648 if (!skb1) {
73649 - atomic_inc(&sk->sk_drops);
73650 + atomic_inc_unchecked(&sk->sk_drops);
73651 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73652 IS_UDPLITE(sk));
73653 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73654 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73655 goto csum_error;
73656
73657 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73658 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73659 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73660 +#endif
73661 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73662
73663 /*
73664 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73665 sk_wmem_alloc_get(sp),
73666 sk_rmem_alloc_get(sp),
73667 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73668 - atomic_read(&sp->sk_refcnt), sp,
73669 - atomic_read(&sp->sk_drops), len);
73670 + atomic_read(&sp->sk_refcnt),
73671 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73672 + NULL,
73673 +#else
73674 + sp,
73675 +#endif
73676 + atomic_read_unchecked(&sp->sk_drops), len);
73677 }
73678
73679 int udp4_seq_show(struct seq_file *seq, void *v)
73680 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73681 index 836c4ea..cbb74dc 100644
73682 --- a/net/ipv6/addrconf.c
73683 +++ b/net/ipv6/addrconf.c
73684 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73685 p.iph.ihl = 5;
73686 p.iph.protocol = IPPROTO_IPV6;
73687 p.iph.ttl = 64;
73688 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73689 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73690
73691 if (ops->ndo_do_ioctl) {
73692 mm_segment_t oldfs = get_fs();
73693 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73694 index 1567fb1..29af910 100644
73695 --- a/net/ipv6/inet6_connection_sock.c
73696 +++ b/net/ipv6/inet6_connection_sock.c
73697 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73698 #ifdef CONFIG_XFRM
73699 {
73700 struct rt6_info *rt = (struct rt6_info *)dst;
73701 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73702 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73703 }
73704 #endif
73705 }
73706 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73707 #ifdef CONFIG_XFRM
73708 if (dst) {
73709 struct rt6_info *rt = (struct rt6_info *)dst;
73710 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73711 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73712 __sk_dst_reset(sk);
73713 dst = NULL;
73714 }
73715 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73716 index 26cb08c..8af9877 100644
73717 --- a/net/ipv6/ipv6_sockglue.c
73718 +++ b/net/ipv6/ipv6_sockglue.c
73719 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73720 if (sk->sk_type != SOCK_STREAM)
73721 return -ENOPROTOOPT;
73722
73723 - msg.msg_control = optval;
73724 + msg.msg_control = (void __force_kernel *)optval;
73725 msg.msg_controllen = len;
73726 msg.msg_flags = flags;
73727
73728 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73729 index 361ebf3..d5628fb 100644
73730 --- a/net/ipv6/raw.c
73731 +++ b/net/ipv6/raw.c
73732 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73733 {
73734 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73735 skb_checksum_complete(skb)) {
73736 - atomic_inc(&sk->sk_drops);
73737 + atomic_inc_unchecked(&sk->sk_drops);
73738 kfree_skb(skb);
73739 return NET_RX_DROP;
73740 }
73741 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73742 struct raw6_sock *rp = raw6_sk(sk);
73743
73744 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73745 - atomic_inc(&sk->sk_drops);
73746 + atomic_inc_unchecked(&sk->sk_drops);
73747 kfree_skb(skb);
73748 return NET_RX_DROP;
73749 }
73750 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73751
73752 if (inet->hdrincl) {
73753 if (skb_checksum_complete(skb)) {
73754 - atomic_inc(&sk->sk_drops);
73755 + atomic_inc_unchecked(&sk->sk_drops);
73756 kfree_skb(skb);
73757 return NET_RX_DROP;
73758 }
73759 @@ -601,7 +601,7 @@ out:
73760 return err;
73761 }
73762
73763 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73764 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73765 struct flowi6 *fl6, struct dst_entry **dstp,
73766 unsigned int flags)
73767 {
73768 @@ -909,12 +909,15 @@ do_confirm:
73769 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73770 char __user *optval, int optlen)
73771 {
73772 + struct icmp6_filter filter;
73773 +
73774 switch (optname) {
73775 case ICMPV6_FILTER:
73776 if (optlen > sizeof(struct icmp6_filter))
73777 optlen = sizeof(struct icmp6_filter);
73778 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73779 + if (copy_from_user(&filter, optval, optlen))
73780 return -EFAULT;
73781 + raw6_sk(sk)->filter = filter;
73782 return 0;
73783 default:
73784 return -ENOPROTOOPT;
73785 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73786 char __user *optval, int __user *optlen)
73787 {
73788 int len;
73789 + struct icmp6_filter filter;
73790
73791 switch (optname) {
73792 case ICMPV6_FILTER:
73793 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73794 len = sizeof(struct icmp6_filter);
73795 if (put_user(len, optlen))
73796 return -EFAULT;
73797 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73798 + filter = raw6_sk(sk)->filter;
73799 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73800 return -EFAULT;
73801 return 0;
73802 default:
73803 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73804 0, 0L, 0,
73805 sock_i_uid(sp), 0,
73806 sock_i_ino(sp),
73807 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73808 + atomic_read(&sp->sk_refcnt),
73809 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73810 + NULL,
73811 +#else
73812 + sp,
73813 +#endif
73814 + atomic_read_unchecked(&sp->sk_drops));
73815 }
73816
73817 static int raw6_seq_show(struct seq_file *seq, void *v)
73818 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73819 index b859e4a..f9d1589 100644
73820 --- a/net/ipv6/tcp_ipv6.c
73821 +++ b/net/ipv6/tcp_ipv6.c
73822 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73823 }
73824 #endif
73825
73826 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73827 +extern int grsec_enable_blackhole;
73828 +#endif
73829 +
73830 static void tcp_v6_hash(struct sock *sk)
73831 {
73832 if (sk->sk_state != TCP_CLOSE) {
73833 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73834 return 0;
73835
73836 reset:
73837 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73838 + if (!grsec_enable_blackhole)
73839 +#endif
73840 tcp_v6_send_reset(sk, skb);
73841 discard:
73842 if (opt_skb)
73843 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73844 TCP_SKB_CB(skb)->sacked = 0;
73845
73846 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73847 - if (!sk)
73848 + if (!sk) {
73849 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73850 + ret = 1;
73851 +#endif
73852 goto no_tcp_socket;
73853 + }
73854
73855 process:
73856 - if (sk->sk_state == TCP_TIME_WAIT)
73857 + if (sk->sk_state == TCP_TIME_WAIT) {
73858 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73859 + ret = 2;
73860 +#endif
73861 goto do_time_wait;
73862 + }
73863
73864 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73865 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73866 @@ -1783,6 +1798,10 @@ no_tcp_socket:
73867 bad_packet:
73868 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73869 } else {
73870 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73871 + if (!grsec_enable_blackhole || (ret == 1 &&
73872 + (skb->dev->flags & IFF_LOOPBACK)))
73873 +#endif
73874 tcp_v6_send_reset(NULL, skb);
73875 }
73876
73877 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73878 uid,
73879 0, /* non standard timer */
73880 0, /* open_requests have no inode */
73881 - 0, req);
73882 + 0,
73883 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73884 + NULL
73885 +#else
73886 + req
73887 +#endif
73888 + );
73889 }
73890
73891 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73892 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73893 sock_i_uid(sp),
73894 icsk->icsk_probes_out,
73895 sock_i_ino(sp),
73896 - atomic_read(&sp->sk_refcnt), sp,
73897 + atomic_read(&sp->sk_refcnt),
73898 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73899 + NULL,
73900 +#else
73901 + sp,
73902 +#endif
73903 jiffies_to_clock_t(icsk->icsk_rto),
73904 jiffies_to_clock_t(icsk->icsk_ack.ato),
73905 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73906 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73907 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73908 tw->tw_substate, 0, 0,
73909 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73910 - atomic_read(&tw->tw_refcnt), tw);
73911 + atomic_read(&tw->tw_refcnt),
73912 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73913 + NULL
73914 +#else
73915 + tw
73916 +#endif
73917 + );
73918 }
73919
73920 static int tcp6_seq_show(struct seq_file *seq, void *v)
73921 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73922 index 8c25419..47a51ae 100644
73923 --- a/net/ipv6/udp.c
73924 +++ b/net/ipv6/udp.c
73925 @@ -50,6 +50,10 @@
73926 #include <linux/seq_file.h>
73927 #include "udp_impl.h"
73928
73929 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73930 +extern int grsec_enable_blackhole;
73931 +#endif
73932 +
73933 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73934 {
73935 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73936 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73937
73938 return 0;
73939 drop:
73940 - atomic_inc(&sk->sk_drops);
73941 + atomic_inc_unchecked(&sk->sk_drops);
73942 drop_no_sk_drops_inc:
73943 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73944 kfree_skb(skb);
73945 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73946 continue;
73947 }
73948 drop:
73949 - atomic_inc(&sk->sk_drops);
73950 + atomic_inc_unchecked(&sk->sk_drops);
73951 UDP6_INC_STATS_BH(sock_net(sk),
73952 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73953 UDP6_INC_STATS_BH(sock_net(sk),
73954 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73955 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73956 proto == IPPROTO_UDPLITE);
73957
73958 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73959 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73960 +#endif
73961 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73962
73963 kfree_skb(skb);
73964 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73965 if (!sock_owned_by_user(sk))
73966 udpv6_queue_rcv_skb(sk, skb);
73967 else if (sk_add_backlog(sk, skb)) {
73968 - atomic_inc(&sk->sk_drops);
73969 + atomic_inc_unchecked(&sk->sk_drops);
73970 bh_unlock_sock(sk);
73971 sock_put(sk);
73972 goto discard;
73973 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73974 0, 0L, 0,
73975 sock_i_uid(sp), 0,
73976 sock_i_ino(sp),
73977 - atomic_read(&sp->sk_refcnt), sp,
73978 - atomic_read(&sp->sk_drops));
73979 + atomic_read(&sp->sk_refcnt),
73980 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73981 + NULL,
73982 +#else
73983 + sp,
73984 +#endif
73985 + atomic_read_unchecked(&sp->sk_drops));
73986 }
73987
73988 int udp6_seq_show(struct seq_file *seq, void *v)
73989 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73990 index 253695d..9481ce8 100644
73991 --- a/net/irda/ircomm/ircomm_tty.c
73992 +++ b/net/irda/ircomm/ircomm_tty.c
73993 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73994 add_wait_queue(&self->open_wait, &wait);
73995
73996 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73997 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73998 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73999
74000 /* As far as I can see, we protect open_count - Jean II */
74001 spin_lock_irqsave(&self->spinlock, flags);
74002 if (!tty_hung_up_p(filp)) {
74003 extra_count = 1;
74004 - self->open_count--;
74005 + local_dec(&self->open_count);
74006 }
74007 spin_unlock_irqrestore(&self->spinlock, flags);
74008 - self->blocked_open++;
74009 + local_inc(&self->blocked_open);
74010
74011 while (1) {
74012 if (tty->termios->c_cflag & CBAUD) {
74013 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74014 }
74015
74016 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
74017 - __FILE__,__LINE__, tty->driver->name, self->open_count );
74018 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74019
74020 schedule();
74021 }
74022 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74023 if (extra_count) {
74024 /* ++ is not atomic, so this should be protected - Jean II */
74025 spin_lock_irqsave(&self->spinlock, flags);
74026 - self->open_count++;
74027 + local_inc(&self->open_count);
74028 spin_unlock_irqrestore(&self->spinlock, flags);
74029 }
74030 - self->blocked_open--;
74031 + local_dec(&self->blocked_open);
74032
74033 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
74034 - __FILE__,__LINE__, tty->driver->name, self->open_count);
74035 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
74036
74037 if (!retval)
74038 self->flags |= ASYNC_NORMAL_ACTIVE;
74039 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
74040 }
74041 /* ++ is not atomic, so this should be protected - Jean II */
74042 spin_lock_irqsave(&self->spinlock, flags);
74043 - self->open_count++;
74044 + local_inc(&self->open_count);
74045
74046 tty->driver_data = self;
74047 self->tty = tty;
74048 spin_unlock_irqrestore(&self->spinlock, flags);
74049
74050 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
74051 - self->line, self->open_count);
74052 + self->line, local_read(&self->open_count));
74053
74054 /* Not really used by us, but lets do it anyway */
74055 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
74056 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74057 return;
74058 }
74059
74060 - if ((tty->count == 1) && (self->open_count != 1)) {
74061 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
74062 /*
74063 * Uh, oh. tty->count is 1, which means that the tty
74064 * structure will be freed. state->count should always
74065 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74066 */
74067 IRDA_DEBUG(0, "%s(), bad serial port count; "
74068 "tty->count is 1, state->count is %d\n", __func__ ,
74069 - self->open_count);
74070 - self->open_count = 1;
74071 + local_read(&self->open_count));
74072 + local_set(&self->open_count, 1);
74073 }
74074
74075 - if (--self->open_count < 0) {
74076 + if (local_dec_return(&self->open_count) < 0) {
74077 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
74078 - __func__, self->line, self->open_count);
74079 - self->open_count = 0;
74080 + __func__, self->line, local_read(&self->open_count));
74081 + local_set(&self->open_count, 0);
74082 }
74083 - if (self->open_count) {
74084 + if (local_read(&self->open_count)) {
74085 spin_unlock_irqrestore(&self->spinlock, flags);
74086
74087 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
74088 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74089 tty->closing = 0;
74090 self->tty = NULL;
74091
74092 - if (self->blocked_open) {
74093 + if (local_read(&self->blocked_open)) {
74094 if (self->close_delay)
74095 schedule_timeout_interruptible(self->close_delay);
74096 wake_up_interruptible(&self->open_wait);
74097 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
74098 spin_lock_irqsave(&self->spinlock, flags);
74099 self->flags &= ~ASYNC_NORMAL_ACTIVE;
74100 self->tty = NULL;
74101 - self->open_count = 0;
74102 + local_set(&self->open_count, 0);
74103 spin_unlock_irqrestore(&self->spinlock, flags);
74104
74105 wake_up_interruptible(&self->open_wait);
74106 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
74107 seq_putc(m, '\n');
74108
74109 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
74110 - seq_printf(m, "Open count: %d\n", self->open_count);
74111 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
74112 seq_printf(m, "Max data size: %d\n", self->max_data_size);
74113 seq_printf(m, "Max header size: %d\n", self->max_header_size);
74114
74115 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
74116 index 274d150..656a144 100644
74117 --- a/net/iucv/af_iucv.c
74118 +++ b/net/iucv/af_iucv.c
74119 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
74120
74121 write_lock_bh(&iucv_sk_list.lock);
74122
74123 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
74124 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74125 while (__iucv_get_sock_by_name(name)) {
74126 sprintf(name, "%08x",
74127 - atomic_inc_return(&iucv_sk_list.autobind_name));
74128 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74129 }
74130
74131 write_unlock_bh(&iucv_sk_list.lock);
74132 diff --git a/net/key/af_key.c b/net/key/af_key.c
74133 index 1e733e9..3d73c9f 100644
74134 --- a/net/key/af_key.c
74135 +++ b/net/key/af_key.c
74136 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
74137 static u32 get_acqseq(void)
74138 {
74139 u32 res;
74140 - static atomic_t acqseq;
74141 + static atomic_unchecked_t acqseq;
74142
74143 do {
74144 - res = atomic_inc_return(&acqseq);
74145 + res = atomic_inc_return_unchecked(&acqseq);
74146 } while (!res);
74147 return res;
74148 }
74149 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
74150 index 73495f1..ad51356 100644
74151 --- a/net/mac80211/ieee80211_i.h
74152 +++ b/net/mac80211/ieee80211_i.h
74153 @@ -27,6 +27,7 @@
74154 #include <net/ieee80211_radiotap.h>
74155 #include <net/cfg80211.h>
74156 #include <net/mac80211.h>
74157 +#include <asm/local.h>
74158 #include "key.h"
74159 #include "sta_info.h"
74160
74161 @@ -764,7 +765,7 @@ struct ieee80211_local {
74162 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
74163 spinlock_t queue_stop_reason_lock;
74164
74165 - int open_count;
74166 + local_t open_count;
74167 int monitors, cooked_mntrs;
74168 /* number of interfaces with corresponding FIF_ flags */
74169 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
74170 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
74171 index 30d7355..e260095 100644
74172 --- a/net/mac80211/iface.c
74173 +++ b/net/mac80211/iface.c
74174 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74175 break;
74176 }
74177
74178 - if (local->open_count == 0) {
74179 + if (local_read(&local->open_count) == 0) {
74180 res = drv_start(local);
74181 if (res)
74182 goto err_del_bss;
74183 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74184 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
74185
74186 if (!is_valid_ether_addr(dev->dev_addr)) {
74187 - if (!local->open_count)
74188 + if (!local_read(&local->open_count))
74189 drv_stop(local);
74190 return -EADDRNOTAVAIL;
74191 }
74192 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74193 mutex_unlock(&local->mtx);
74194
74195 if (coming_up)
74196 - local->open_count++;
74197 + local_inc(&local->open_count);
74198
74199 if (hw_reconf_flags) {
74200 ieee80211_hw_config(local, hw_reconf_flags);
74201 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74202 err_del_interface:
74203 drv_remove_interface(local, &sdata->vif);
74204 err_stop:
74205 - if (!local->open_count)
74206 + if (!local_read(&local->open_count))
74207 drv_stop(local);
74208 err_del_bss:
74209 sdata->bss = NULL;
74210 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74211 }
74212
74213 if (going_down)
74214 - local->open_count--;
74215 + local_dec(&local->open_count);
74216
74217 switch (sdata->vif.type) {
74218 case NL80211_IFTYPE_AP_VLAN:
74219 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74220
74221 ieee80211_recalc_ps(local, -1);
74222
74223 - if (local->open_count == 0) {
74224 + if (local_read(&local->open_count) == 0) {
74225 if (local->ops->napi_poll)
74226 napi_disable(&local->napi);
74227 ieee80211_clear_tx_pending(local);
74228 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
74229 index a7536fd..4039cc0 100644
74230 --- a/net/mac80211/main.c
74231 +++ b/net/mac80211/main.c
74232 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
74233 local->hw.conf.power_level = power;
74234 }
74235
74236 - if (changed && local->open_count) {
74237 + if (changed && local_read(&local->open_count)) {
74238 ret = drv_config(local, changed);
74239 /*
74240 * Goal:
74241 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
74242 index 9ee7164..56c5061 100644
74243 --- a/net/mac80211/pm.c
74244 +++ b/net/mac80211/pm.c
74245 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74246 struct ieee80211_sub_if_data *sdata;
74247 struct sta_info *sta;
74248
74249 - if (!local->open_count)
74250 + if (!local_read(&local->open_count))
74251 goto suspend;
74252
74253 ieee80211_scan_cancel(local);
74254 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74255 cancel_work_sync(&local->dynamic_ps_enable_work);
74256 del_timer_sync(&local->dynamic_ps_timer);
74257
74258 - local->wowlan = wowlan && local->open_count;
74259 + local->wowlan = wowlan && local_read(&local->open_count);
74260 if (local->wowlan) {
74261 int err = drv_suspend(local, wowlan);
74262 if (err < 0) {
74263 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74264 }
74265
74266 /* stop hardware - this must stop RX */
74267 - if (local->open_count)
74268 + if (local_read(&local->open_count))
74269 ieee80211_stop_device(local);
74270
74271 suspend:
74272 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
74273 index 5a5a776..9600b11 100644
74274 --- a/net/mac80211/rate.c
74275 +++ b/net/mac80211/rate.c
74276 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
74277
74278 ASSERT_RTNL();
74279
74280 - if (local->open_count)
74281 + if (local_read(&local->open_count))
74282 return -EBUSY;
74283
74284 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74285 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
74286 index c97a065..ff61928 100644
74287 --- a/net/mac80211/rc80211_pid_debugfs.c
74288 +++ b/net/mac80211/rc80211_pid_debugfs.c
74289 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
74290
74291 spin_unlock_irqrestore(&events->lock, status);
74292
74293 - if (copy_to_user(buf, pb, p))
74294 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74295 return -EFAULT;
74296
74297 return p;
74298 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
74299 index d5230ec..c604b21 100644
74300 --- a/net/mac80211/util.c
74301 +++ b/net/mac80211/util.c
74302 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
74303 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74304
74305 /* everything else happens only if HW was up & running */
74306 - if (!local->open_count)
74307 + if (!local_read(&local->open_count))
74308 goto wake_up;
74309
74310 /*
74311 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
74312 index d5597b7..ab6d39c 100644
74313 --- a/net/netfilter/Kconfig
74314 +++ b/net/netfilter/Kconfig
74315 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
74316
74317 To compile it as a module, choose M here. If unsure, say N.
74318
74319 +config NETFILTER_XT_MATCH_GRADM
74320 + tristate '"gradm" match support'
74321 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74322 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74323 + ---help---
74324 + The gradm match allows to match on grsecurity RBAC being enabled.
74325 + It is useful when iptables rules are applied early on bootup to
74326 + prevent connections to the machine (except from a trusted host)
74327 + while the RBAC system is disabled.
74328 +
74329 config NETFILTER_XT_MATCH_HASHLIMIT
74330 tristate '"hashlimit" match support'
74331 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74332 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
74333 index 1a02853..5d8c22e 100644
74334 --- a/net/netfilter/Makefile
74335 +++ b/net/netfilter/Makefile
74336 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
74337 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74338 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74339 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74340 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74341 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74342 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74343 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74344 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74345 index 29fa5ba..8debc79 100644
74346 --- a/net/netfilter/ipvs/ip_vs_conn.c
74347 +++ b/net/netfilter/ipvs/ip_vs_conn.c
74348 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74349 /* Increase the refcnt counter of the dest */
74350 atomic_inc(&dest->refcnt);
74351
74352 - conn_flags = atomic_read(&dest->conn_flags);
74353 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
74354 if (cp->protocol != IPPROTO_UDP)
74355 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74356 /* Bind with the destination and its corresponding transmitter */
74357 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74358 atomic_set(&cp->refcnt, 1);
74359
74360 atomic_set(&cp->n_control, 0);
74361 - atomic_set(&cp->in_pkts, 0);
74362 + atomic_set_unchecked(&cp->in_pkts, 0);
74363
74364 atomic_inc(&ipvs->conn_count);
74365 if (flags & IP_VS_CONN_F_NO_CPORT)
74366 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74367
74368 /* Don't drop the entry if its number of incoming packets is not
74369 located in [0, 8] */
74370 - i = atomic_read(&cp->in_pkts);
74371 + i = atomic_read_unchecked(&cp->in_pkts);
74372 if (i > 8 || i < 0) return 0;
74373
74374 if (!todrop_rate[i]) return 0;
74375 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74376 index 093cc32..9209ae1 100644
74377 --- a/net/netfilter/ipvs/ip_vs_core.c
74378 +++ b/net/netfilter/ipvs/ip_vs_core.c
74379 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74380 ret = cp->packet_xmit(skb, cp, pd->pp);
74381 /* do not touch skb anymore */
74382
74383 - atomic_inc(&cp->in_pkts);
74384 + atomic_inc_unchecked(&cp->in_pkts);
74385 ip_vs_conn_put(cp);
74386 return ret;
74387 }
74388 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74389 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74390 pkts = sysctl_sync_threshold(ipvs);
74391 else
74392 - pkts = atomic_add_return(1, &cp->in_pkts);
74393 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74394
74395 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74396 cp->protocol == IPPROTO_SCTP) {
74397 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74398 index e1a66cf..0910076 100644
74399 --- a/net/netfilter/ipvs/ip_vs_ctl.c
74400 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
74401 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74402 ip_vs_rs_hash(ipvs, dest);
74403 write_unlock_bh(&ipvs->rs_lock);
74404 }
74405 - atomic_set(&dest->conn_flags, conn_flags);
74406 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
74407
74408 /* bind the service */
74409 if (!dest->svc) {
74410 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74411 " %-7s %-6d %-10d %-10d\n",
74412 &dest->addr.in6,
74413 ntohs(dest->port),
74414 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74415 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74416 atomic_read(&dest->weight),
74417 atomic_read(&dest->activeconns),
74418 atomic_read(&dest->inactconns));
74419 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74420 "%-7s %-6d %-10d %-10d\n",
74421 ntohl(dest->addr.ip),
74422 ntohs(dest->port),
74423 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74424 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74425 atomic_read(&dest->weight),
74426 atomic_read(&dest->activeconns),
74427 atomic_read(&dest->inactconns));
74428 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74429
74430 entry.addr = dest->addr.ip;
74431 entry.port = dest->port;
74432 - entry.conn_flags = atomic_read(&dest->conn_flags);
74433 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74434 entry.weight = atomic_read(&dest->weight);
74435 entry.u_threshold = dest->u_threshold;
74436 entry.l_threshold = dest->l_threshold;
74437 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74438 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74439
74440 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74441 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74442 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74443 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74444 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74445 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74446 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74447 index 2b6678c0..aaa41fc 100644
74448 --- a/net/netfilter/ipvs/ip_vs_sync.c
74449 +++ b/net/netfilter/ipvs/ip_vs_sync.c
74450 @@ -649,7 +649,7 @@ control:
74451 * i.e only increment in_pkts for Templates.
74452 */
74453 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74454 - int pkts = atomic_add_return(1, &cp->in_pkts);
74455 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74456
74457 if (pkts % sysctl_sync_period(ipvs) != 1)
74458 return;
74459 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74460
74461 if (opt)
74462 memcpy(&cp->in_seq, opt, sizeof(*opt));
74463 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74464 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74465 cp->state = state;
74466 cp->old_state = cp->state;
74467 /*
74468 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74469 index aa2d720..d8aa111 100644
74470 --- a/net/netfilter/ipvs/ip_vs_xmit.c
74471 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
74472 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74473 else
74474 rc = NF_ACCEPT;
74475 /* do not touch skb anymore */
74476 - atomic_inc(&cp->in_pkts);
74477 + atomic_inc_unchecked(&cp->in_pkts);
74478 goto out;
74479 }
74480
74481 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74482 else
74483 rc = NF_ACCEPT;
74484 /* do not touch skb anymore */
74485 - atomic_inc(&cp->in_pkts);
74486 + atomic_inc_unchecked(&cp->in_pkts);
74487 goto out;
74488 }
74489
74490 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74491 index 66b2c54..c7884e3 100644
74492 --- a/net/netfilter/nfnetlink_log.c
74493 +++ b/net/netfilter/nfnetlink_log.c
74494 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74495 };
74496
74497 static DEFINE_SPINLOCK(instances_lock);
74498 -static atomic_t global_seq;
74499 +static atomic_unchecked_t global_seq;
74500
74501 #define INSTANCE_BUCKETS 16
74502 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74503 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74504 /* global sequence number */
74505 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74506 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74507 - htonl(atomic_inc_return(&global_seq)));
74508 + htonl(atomic_inc_return_unchecked(&global_seq)));
74509
74510 if (data_len) {
74511 struct nlattr *nla;
74512 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74513 new file mode 100644
74514 index 0000000..6905327
74515 --- /dev/null
74516 +++ b/net/netfilter/xt_gradm.c
74517 @@ -0,0 +1,51 @@
74518 +/*
74519 + * gradm match for netfilter
74520 + * Copyright © Zbigniew Krzystolik, 2010
74521 + *
74522 + * This program is free software; you can redistribute it and/or modify
74523 + * it under the terms of the GNU General Public License; either version
74524 + * 2 or 3 as published by the Free Software Foundation.
74525 + */
74526 +#include <linux/module.h>
74527 +#include <linux/moduleparam.h>
74528 +#include <linux/skbuff.h>
74529 +#include <linux/netfilter/x_tables.h>
74530 +#include <linux/grsecurity.h>
74531 +#include <linux/netfilter/xt_gradm.h>
74532 +
74533 +static bool
74534 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74535 +{
74536 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74537 + bool retval = false;
74538 + if (gr_acl_is_enabled())
74539 + retval = true;
74540 + return retval ^ info->invflags;
74541 +}
74542 +
74543 +static struct xt_match gradm_mt_reg __read_mostly = {
74544 + .name = "gradm",
74545 + .revision = 0,
74546 + .family = NFPROTO_UNSPEC,
74547 + .match = gradm_mt,
74548 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74549 + .me = THIS_MODULE,
74550 +};
74551 +
74552 +static int __init gradm_mt_init(void)
74553 +{
74554 + return xt_register_match(&gradm_mt_reg);
74555 +}
74556 +
74557 +static void __exit gradm_mt_exit(void)
74558 +{
74559 + xt_unregister_match(&gradm_mt_reg);
74560 +}
74561 +
74562 +module_init(gradm_mt_init);
74563 +module_exit(gradm_mt_exit);
74564 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74565 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74566 +MODULE_LICENSE("GPL");
74567 +MODULE_ALIAS("ipt_gradm");
74568 +MODULE_ALIAS("ip6t_gradm");
74569 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
74570 index 4fe4fb4..87a89e5 100644
74571 --- a/net/netfilter/xt_statistic.c
74572 +++ b/net/netfilter/xt_statistic.c
74573 @@ -19,7 +19,7 @@
74574 #include <linux/module.h>
74575
74576 struct xt_statistic_priv {
74577 - atomic_t count;
74578 + atomic_unchecked_t count;
74579 } ____cacheline_aligned_in_smp;
74580
74581 MODULE_LICENSE("GPL");
74582 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
74583 break;
74584 case XT_STATISTIC_MODE_NTH:
74585 do {
74586 - oval = atomic_read(&info->master->count);
74587 + oval = atomic_read_unchecked(&info->master->count);
74588 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74589 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74590 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74591 if (nval == 0)
74592 ret = !ret;
74593 break;
74594 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
74595 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74596 if (info->master == NULL)
74597 return -ENOMEM;
74598 - atomic_set(&info->master->count, info->u.nth.count);
74599 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74600
74601 return 0;
74602 }
74603 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
74604 index 1201b6d..bcff8c6 100644
74605 --- a/net/netlink/af_netlink.c
74606 +++ b/net/netlink/af_netlink.c
74607 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
74608 sk->sk_error_report(sk);
74609 }
74610 }
74611 - atomic_inc(&sk->sk_drops);
74612 + atomic_inc_unchecked(&sk->sk_drops);
74613 }
74614
74615 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74616 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
74617 sk_wmem_alloc_get(s),
74618 nlk->cb,
74619 atomic_read(&s->sk_refcnt),
74620 - atomic_read(&s->sk_drops),
74621 + atomic_read_unchecked(&s->sk_drops),
74622 sock_i_ino(s)
74623 );
74624
74625 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
74626 index 732152f..60bb09e 100644
74627 --- a/net/netrom/af_netrom.c
74628 +++ b/net/netrom/af_netrom.c
74629 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74630 struct sock *sk = sock->sk;
74631 struct nr_sock *nr = nr_sk(sk);
74632
74633 + memset(sax, 0, sizeof(*sax));
74634 lock_sock(sk);
74635 if (peer != 0) {
74636 if (sk->sk_state != TCP_ESTABLISHED) {
74637 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74638 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74639 } else {
74640 sax->fsa_ax25.sax25_family = AF_NETROM;
74641 - sax->fsa_ax25.sax25_ndigis = 0;
74642 sax->fsa_ax25.sax25_call = nr->source_addr;
74643 *uaddr_len = sizeof(struct sockaddr_ax25);
74644 }
74645 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
74646 index d9d4970..d5a6a68 100644
74647 --- a/net/packet/af_packet.c
74648 +++ b/net/packet/af_packet.c
74649 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74650
74651 spin_lock(&sk->sk_receive_queue.lock);
74652 po->stats.tp_packets++;
74653 - skb->dropcount = atomic_read(&sk->sk_drops);
74654 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74655 __skb_queue_tail(&sk->sk_receive_queue, skb);
74656 spin_unlock(&sk->sk_receive_queue.lock);
74657 sk->sk_data_ready(sk, skb->len);
74658 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74659 drop_n_acct:
74660 spin_lock(&sk->sk_receive_queue.lock);
74661 po->stats.tp_drops++;
74662 - atomic_inc(&sk->sk_drops);
74663 + atomic_inc_unchecked(&sk->sk_drops);
74664 spin_unlock(&sk->sk_receive_queue.lock);
74665
74666 drop_n_restore:
74667 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74668 case PACKET_HDRLEN:
74669 if (len > sizeof(int))
74670 len = sizeof(int);
74671 - if (copy_from_user(&val, optval, len))
74672 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74673 return -EFAULT;
74674 switch (val) {
74675 case TPACKET_V1:
74676 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74677
74678 if (put_user(len, optlen))
74679 return -EFAULT;
74680 - if (copy_to_user(optval, data, len))
74681 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74682 return -EFAULT;
74683 return 0;
74684 }
74685 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
74686 index d65f699..05aa6ce 100644
74687 --- a/net/phonet/af_phonet.c
74688 +++ b/net/phonet/af_phonet.c
74689 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
74690 {
74691 struct phonet_protocol *pp;
74692
74693 - if (protocol >= PHONET_NPROTO)
74694 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74695 return NULL;
74696
74697 rcu_read_lock();
74698 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
74699 {
74700 int err = 0;
74701
74702 - if (protocol >= PHONET_NPROTO)
74703 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74704 return -EINVAL;
74705
74706 err = proto_register(pp->prot, 1);
74707 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
74708 index 2ba6e9f..409573f 100644
74709 --- a/net/phonet/pep.c
74710 +++ b/net/phonet/pep.c
74711 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74712
74713 case PNS_PEP_CTRL_REQ:
74714 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74715 - atomic_inc(&sk->sk_drops);
74716 + atomic_inc_unchecked(&sk->sk_drops);
74717 break;
74718 }
74719 __skb_pull(skb, 4);
74720 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74721 }
74722
74723 if (pn->rx_credits == 0) {
74724 - atomic_inc(&sk->sk_drops);
74725 + atomic_inc_unchecked(&sk->sk_drops);
74726 err = -ENOBUFS;
74727 break;
74728 }
74729 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
74730 }
74731
74732 if (pn->rx_credits == 0) {
74733 - atomic_inc(&sk->sk_drops);
74734 + atomic_inc_unchecked(&sk->sk_drops);
74735 err = NET_RX_DROP;
74736 break;
74737 }
74738 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
74739 index 4c7eff3..59c727f 100644
74740 --- a/net/phonet/socket.c
74741 +++ b/net/phonet/socket.c
74742 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
74743 pn->resource, sk->sk_state,
74744 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74745 sock_i_uid(sk), sock_i_ino(sk),
74746 - atomic_read(&sk->sk_refcnt), sk,
74747 - atomic_read(&sk->sk_drops), &len);
74748 + atomic_read(&sk->sk_refcnt),
74749 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74750 + NULL,
74751 +#else
74752 + sk,
74753 +#endif
74754 + atomic_read_unchecked(&sk->sk_drops), &len);
74755 }
74756 seq_printf(seq, "%*s\n", 127 - len, "");
74757 return 0;
74758 diff --git a/net/rds/cong.c b/net/rds/cong.c
74759 index e5b65ac..f3b6fb7 100644
74760 --- a/net/rds/cong.c
74761 +++ b/net/rds/cong.c
74762 @@ -78,7 +78,7 @@
74763 * finds that the saved generation number is smaller than the global generation
74764 * number, it wakes up the process.
74765 */
74766 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74767 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74768
74769 /*
74770 * Congestion monitoring
74771 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
74772 rdsdebug("waking map %p for %pI4\n",
74773 map, &map->m_addr);
74774 rds_stats_inc(s_cong_update_received);
74775 - atomic_inc(&rds_cong_generation);
74776 + atomic_inc_unchecked(&rds_cong_generation);
74777 if (waitqueue_active(&map->m_waitq))
74778 wake_up(&map->m_waitq);
74779 if (waitqueue_active(&rds_poll_waitq))
74780 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74781
74782 int rds_cong_updated_since(unsigned long *recent)
74783 {
74784 - unsigned long gen = atomic_read(&rds_cong_generation);
74785 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74786
74787 if (likely(*recent == gen))
74788 return 0;
74789 diff --git a/net/rds/ib.h b/net/rds/ib.h
74790 index edfaaaf..8c89879 100644
74791 --- a/net/rds/ib.h
74792 +++ b/net/rds/ib.h
74793 @@ -128,7 +128,7 @@ struct rds_ib_connection {
74794 /* sending acks */
74795 unsigned long i_ack_flags;
74796 #ifdef KERNEL_HAS_ATOMIC64
74797 - atomic64_t i_ack_next; /* next ACK to send */
74798 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74799 #else
74800 spinlock_t i_ack_lock; /* protect i_ack_next */
74801 u64 i_ack_next; /* next ACK to send */
74802 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
74803 index 51c8689..36c555f 100644
74804 --- a/net/rds/ib_cm.c
74805 +++ b/net/rds/ib_cm.c
74806 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
74807 /* Clear the ACK state */
74808 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74809 #ifdef KERNEL_HAS_ATOMIC64
74810 - atomic64_set(&ic->i_ack_next, 0);
74811 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74812 #else
74813 ic->i_ack_next = 0;
74814 #endif
74815 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
74816 index e29e0ca..fa3a6a3 100644
74817 --- a/net/rds/ib_recv.c
74818 +++ b/net/rds/ib_recv.c
74819 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74820 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74821 int ack_required)
74822 {
74823 - atomic64_set(&ic->i_ack_next, seq);
74824 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74825 if (ack_required) {
74826 smp_mb__before_clear_bit();
74827 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74828 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74829 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74830 smp_mb__after_clear_bit();
74831
74832 - return atomic64_read(&ic->i_ack_next);
74833 + return atomic64_read_unchecked(&ic->i_ack_next);
74834 }
74835 #endif
74836
74837 diff --git a/net/rds/iw.h b/net/rds/iw.h
74838 index 04ce3b1..48119a6 100644
74839 --- a/net/rds/iw.h
74840 +++ b/net/rds/iw.h
74841 @@ -134,7 +134,7 @@ struct rds_iw_connection {
74842 /* sending acks */
74843 unsigned long i_ack_flags;
74844 #ifdef KERNEL_HAS_ATOMIC64
74845 - atomic64_t i_ack_next; /* next ACK to send */
74846 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74847 #else
74848 spinlock_t i_ack_lock; /* protect i_ack_next */
74849 u64 i_ack_next; /* next ACK to send */
74850 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
74851 index 9556d28..f046d0e 100644
74852 --- a/net/rds/iw_cm.c
74853 +++ b/net/rds/iw_cm.c
74854 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
74855 /* Clear the ACK state */
74856 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74857 #ifdef KERNEL_HAS_ATOMIC64
74858 - atomic64_set(&ic->i_ack_next, 0);
74859 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74860 #else
74861 ic->i_ack_next = 0;
74862 #endif
74863 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
74864 index 5e57347..3916042 100644
74865 --- a/net/rds/iw_recv.c
74866 +++ b/net/rds/iw_recv.c
74867 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74868 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74869 int ack_required)
74870 {
74871 - atomic64_set(&ic->i_ack_next, seq);
74872 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74873 if (ack_required) {
74874 smp_mb__before_clear_bit();
74875 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74876 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74877 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74878 smp_mb__after_clear_bit();
74879
74880 - return atomic64_read(&ic->i_ack_next);
74881 + return atomic64_read_unchecked(&ic->i_ack_next);
74882 }
74883 #endif
74884
74885 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
74886 index edac9ef..16bcb98 100644
74887 --- a/net/rds/tcp.c
74888 +++ b/net/rds/tcp.c
74889 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
74890 int val = 1;
74891
74892 set_fs(KERNEL_DS);
74893 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74894 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74895 sizeof(val));
74896 set_fs(oldfs);
74897 }
74898 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
74899 index 1b4fd68..2234175 100644
74900 --- a/net/rds/tcp_send.c
74901 +++ b/net/rds/tcp_send.c
74902 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
74903
74904 oldfs = get_fs();
74905 set_fs(KERNEL_DS);
74906 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74907 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74908 sizeof(val));
74909 set_fs(oldfs);
74910 }
74911 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
74912 index 74c064c..fdec26f 100644
74913 --- a/net/rxrpc/af_rxrpc.c
74914 +++ b/net/rxrpc/af_rxrpc.c
74915 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
74916 __be32 rxrpc_epoch;
74917
74918 /* current debugging ID */
74919 -atomic_t rxrpc_debug_id;
74920 +atomic_unchecked_t rxrpc_debug_id;
74921
74922 /* count of skbs currently in use */
74923 atomic_t rxrpc_n_skbs;
74924 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
74925 index f99cfce..cc529dd 100644
74926 --- a/net/rxrpc/ar-ack.c
74927 +++ b/net/rxrpc/ar-ack.c
74928 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74929
74930 _enter("{%d,%d,%d,%d},",
74931 call->acks_hard, call->acks_unacked,
74932 - atomic_read(&call->sequence),
74933 + atomic_read_unchecked(&call->sequence),
74934 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74935
74936 stop = 0;
74937 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74938
74939 /* each Tx packet has a new serial number */
74940 sp->hdr.serial =
74941 - htonl(atomic_inc_return(&call->conn->serial));
74942 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74943
74944 hdr = (struct rxrpc_header *) txb->head;
74945 hdr->serial = sp->hdr.serial;
74946 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
74947 */
74948 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74949 {
74950 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74951 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74952 }
74953
74954 /*
74955 @@ -629,7 +629,7 @@ process_further:
74956
74957 latest = ntohl(sp->hdr.serial);
74958 hard = ntohl(ack.firstPacket);
74959 - tx = atomic_read(&call->sequence);
74960 + tx = atomic_read_unchecked(&call->sequence);
74961
74962 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74963 latest,
74964 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
74965 goto maybe_reschedule;
74966
74967 send_ACK_with_skew:
74968 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74969 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74970 ntohl(ack.serial));
74971 send_ACK:
74972 mtu = call->conn->trans->peer->if_mtu;
74973 @@ -1173,7 +1173,7 @@ send_ACK:
74974 ackinfo.rxMTU = htonl(5692);
74975 ackinfo.jumbo_max = htonl(4);
74976
74977 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74978 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74979 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74980 ntohl(hdr.serial),
74981 ntohs(ack.maxSkew),
74982 @@ -1191,7 +1191,7 @@ send_ACK:
74983 send_message:
74984 _debug("send message");
74985
74986 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74987 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74988 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74989 send_message_2:
74990
74991 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
74992 index bf656c2..48f9d27 100644
74993 --- a/net/rxrpc/ar-call.c
74994 +++ b/net/rxrpc/ar-call.c
74995 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
74996 spin_lock_init(&call->lock);
74997 rwlock_init(&call->state_lock);
74998 atomic_set(&call->usage, 1);
74999 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
75000 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75001 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
75002
75003 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
75004 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
75005 index 4106ca9..a338d7a 100644
75006 --- a/net/rxrpc/ar-connection.c
75007 +++ b/net/rxrpc/ar-connection.c
75008 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
75009 rwlock_init(&conn->lock);
75010 spin_lock_init(&conn->state_lock);
75011 atomic_set(&conn->usage, 1);
75012 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
75013 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75014 conn->avail_calls = RXRPC_MAXCALLS;
75015 conn->size_align = 4;
75016 conn->header_size = sizeof(struct rxrpc_header);
75017 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
75018 index e7ed43a..6afa140 100644
75019 --- a/net/rxrpc/ar-connevent.c
75020 +++ b/net/rxrpc/ar-connevent.c
75021 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
75022
75023 len = iov[0].iov_len + iov[1].iov_len;
75024
75025 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75026 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75027 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
75028
75029 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75030 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
75031 index 1a2b0633..e8d1382 100644
75032 --- a/net/rxrpc/ar-input.c
75033 +++ b/net/rxrpc/ar-input.c
75034 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
75035 /* track the latest serial number on this connection for ACK packet
75036 * information */
75037 serial = ntohl(sp->hdr.serial);
75038 - hi_serial = atomic_read(&call->conn->hi_serial);
75039 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
75040 while (serial > hi_serial)
75041 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
75042 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
75043 serial);
75044
75045 /* request ACK generation for any ACK or DATA packet that requests
75046 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
75047 index 8e22bd3..f66d1c0 100644
75048 --- a/net/rxrpc/ar-internal.h
75049 +++ b/net/rxrpc/ar-internal.h
75050 @@ -272,8 +272,8 @@ struct rxrpc_connection {
75051 int error; /* error code for local abort */
75052 int debug_id; /* debug ID for printks */
75053 unsigned call_counter; /* call ID counter */
75054 - atomic_t serial; /* packet serial number counter */
75055 - atomic_t hi_serial; /* highest serial number received */
75056 + atomic_unchecked_t serial; /* packet serial number counter */
75057 + atomic_unchecked_t hi_serial; /* highest serial number received */
75058 u8 avail_calls; /* number of calls available */
75059 u8 size_align; /* data size alignment (for security) */
75060 u8 header_size; /* rxrpc + security header size */
75061 @@ -346,7 +346,7 @@ struct rxrpc_call {
75062 spinlock_t lock;
75063 rwlock_t state_lock; /* lock for state transition */
75064 atomic_t usage;
75065 - atomic_t sequence; /* Tx data packet sequence counter */
75066 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
75067 u32 abort_code; /* local/remote abort code */
75068 enum { /* current state of call */
75069 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
75070 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
75071 */
75072 extern atomic_t rxrpc_n_skbs;
75073 extern __be32 rxrpc_epoch;
75074 -extern atomic_t rxrpc_debug_id;
75075 +extern atomic_unchecked_t rxrpc_debug_id;
75076 extern struct workqueue_struct *rxrpc_workqueue;
75077
75078 /*
75079 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
75080 index 87f7135..74d3703 100644
75081 --- a/net/rxrpc/ar-local.c
75082 +++ b/net/rxrpc/ar-local.c
75083 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
75084 spin_lock_init(&local->lock);
75085 rwlock_init(&local->services_lock);
75086 atomic_set(&local->usage, 1);
75087 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
75088 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75089 memcpy(&local->srx, srx, sizeof(*srx));
75090 }
75091
75092 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
75093 index 338d793..47391d0 100644
75094 --- a/net/rxrpc/ar-output.c
75095 +++ b/net/rxrpc/ar-output.c
75096 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
75097 sp->hdr.cid = call->cid;
75098 sp->hdr.callNumber = call->call_id;
75099 sp->hdr.seq =
75100 - htonl(atomic_inc_return(&call->sequence));
75101 + htonl(atomic_inc_return_unchecked(&call->sequence));
75102 sp->hdr.serial =
75103 - htonl(atomic_inc_return(&conn->serial));
75104 + htonl(atomic_inc_return_unchecked(&conn->serial));
75105 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
75106 sp->hdr.userStatus = 0;
75107 sp->hdr.securityIndex = conn->security_ix;
75108 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
75109 index 2754f09..b20e38f 100644
75110 --- a/net/rxrpc/ar-peer.c
75111 +++ b/net/rxrpc/ar-peer.c
75112 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
75113 INIT_LIST_HEAD(&peer->error_targets);
75114 spin_lock_init(&peer->lock);
75115 atomic_set(&peer->usage, 1);
75116 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
75117 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75118 memcpy(&peer->srx, srx, sizeof(*srx));
75119
75120 rxrpc_assess_MTU_size(peer);
75121 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
75122 index 38047f7..9f48511 100644
75123 --- a/net/rxrpc/ar-proc.c
75124 +++ b/net/rxrpc/ar-proc.c
75125 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
75126 atomic_read(&conn->usage),
75127 rxrpc_conn_states[conn->state],
75128 key_serial(conn->key),
75129 - atomic_read(&conn->serial),
75130 - atomic_read(&conn->hi_serial));
75131 + atomic_read_unchecked(&conn->serial),
75132 + atomic_read_unchecked(&conn->hi_serial));
75133
75134 return 0;
75135 }
75136 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
75137 index 92df566..87ec1bf 100644
75138 --- a/net/rxrpc/ar-transport.c
75139 +++ b/net/rxrpc/ar-transport.c
75140 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
75141 spin_lock_init(&trans->client_lock);
75142 rwlock_init(&trans->conn_lock);
75143 atomic_set(&trans->usage, 1);
75144 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
75145 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75146
75147 if (peer->srx.transport.family == AF_INET) {
75148 switch (peer->srx.transport_type) {
75149 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
75150 index 7635107..4670276 100644
75151 --- a/net/rxrpc/rxkad.c
75152 +++ b/net/rxrpc/rxkad.c
75153 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
75154
75155 len = iov[0].iov_len + iov[1].iov_len;
75156
75157 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75158 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75159 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
75160
75161 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75162 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
75163
75164 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
75165
75166 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
75167 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75168 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
75169
75170 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
75171 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
75172 index 1e2eee8..ce3967e 100644
75173 --- a/net/sctp/proc.c
75174 +++ b/net/sctp/proc.c
75175 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
75176 seq_printf(seq,
75177 "%8pK %8pK %-3d %-3d %-2d %-4d "
75178 "%4d %8d %8d %7d %5lu %-5d %5d ",
75179 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
75180 + assoc, sk,
75181 + sctp_sk(sk)->type, sk->sk_state,
75182 assoc->state, hash,
75183 assoc->assoc_id,
75184 assoc->sndbuf_used,
75185 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
75186 index 54a7cd2..944edae 100644
75187 --- a/net/sctp/socket.c
75188 +++ b/net/sctp/socket.c
75189 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
75190 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
75191 if (space_left < addrlen)
75192 return -ENOMEM;
75193 - if (copy_to_user(to, &temp, addrlen))
75194 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
75195 return -EFAULT;
75196 to += addrlen;
75197 cnt++;
75198 diff --git a/net/socket.c b/net/socket.c
75199 index 2dce67a..1e91168 100644
75200 --- a/net/socket.c
75201 +++ b/net/socket.c
75202 @@ -88,6 +88,7 @@
75203 #include <linux/nsproxy.h>
75204 #include <linux/magic.h>
75205 #include <linux/slab.h>
75206 +#include <linux/in.h>
75207
75208 #include <asm/uaccess.h>
75209 #include <asm/unistd.h>
75210 @@ -105,6 +106,8 @@
75211 #include <linux/sockios.h>
75212 #include <linux/atalk.h>
75213
75214 +#include <linux/grsock.h>
75215 +
75216 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
75217 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
75218 unsigned long nr_segs, loff_t pos);
75219 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
75220 &sockfs_dentry_operations, SOCKFS_MAGIC);
75221 }
75222
75223 -static struct vfsmount *sock_mnt __read_mostly;
75224 +struct vfsmount *sock_mnt __read_mostly;
75225
75226 static struct file_system_type sock_fs_type = {
75227 .name = "sockfs",
75228 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
75229 return -EAFNOSUPPORT;
75230 if (type < 0 || type >= SOCK_MAX)
75231 return -EINVAL;
75232 + if (protocol < 0)
75233 + return -EINVAL;
75234
75235 /* Compatibility.
75236
75237 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
75238 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
75239 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
75240
75241 + if(!gr_search_socket(family, type, protocol)) {
75242 + retval = -EACCES;
75243 + goto out;
75244 + }
75245 +
75246 + if (gr_handle_sock_all(family, type, protocol)) {
75247 + retval = -EACCES;
75248 + goto out;
75249 + }
75250 +
75251 retval = sock_create(family, type, protocol, &sock);
75252 if (retval < 0)
75253 goto out;
75254 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75255 if (sock) {
75256 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
75257 if (err >= 0) {
75258 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
75259 + err = -EACCES;
75260 + goto error;
75261 + }
75262 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
75263 + if (err)
75264 + goto error;
75265 +
75266 err = security_socket_bind(sock,
75267 (struct sockaddr *)&address,
75268 addrlen);
75269 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75270 (struct sockaddr *)
75271 &address, addrlen);
75272 }
75273 +error:
75274 fput_light(sock->file, fput_needed);
75275 }
75276 return err;
75277 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
75278 if ((unsigned)backlog > somaxconn)
75279 backlog = somaxconn;
75280
75281 + if (gr_handle_sock_server_other(sock->sk)) {
75282 + err = -EPERM;
75283 + goto error;
75284 + }
75285 +
75286 + err = gr_search_listen(sock);
75287 + if (err)
75288 + goto error;
75289 +
75290 err = security_socket_listen(sock, backlog);
75291 if (!err)
75292 err = sock->ops->listen(sock, backlog);
75293
75294 +error:
75295 fput_light(sock->file, fput_needed);
75296 }
75297 return err;
75298 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75299 newsock->type = sock->type;
75300 newsock->ops = sock->ops;
75301
75302 + if (gr_handle_sock_server_other(sock->sk)) {
75303 + err = -EPERM;
75304 + sock_release(newsock);
75305 + goto out_put;
75306 + }
75307 +
75308 + err = gr_search_accept(sock);
75309 + if (err) {
75310 + sock_release(newsock);
75311 + goto out_put;
75312 + }
75313 +
75314 /*
75315 * We don't need try_module_get here, as the listening socket (sock)
75316 * has the protocol module (sock->ops->owner) held.
75317 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75318 fd_install(newfd, newfile);
75319 err = newfd;
75320
75321 + gr_attach_curr_ip(newsock->sk);
75322 +
75323 out_put:
75324 fput_light(sock->file, fput_needed);
75325 out:
75326 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75327 int, addrlen)
75328 {
75329 struct socket *sock;
75330 + struct sockaddr *sck;
75331 struct sockaddr_storage address;
75332 int err, fput_needed;
75333
75334 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75335 if (err < 0)
75336 goto out_put;
75337
75338 + sck = (struct sockaddr *)&address;
75339 +
75340 + if (gr_handle_sock_client(sck)) {
75341 + err = -EACCES;
75342 + goto out_put;
75343 + }
75344 +
75345 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
75346 + if (err)
75347 + goto out_put;
75348 +
75349 err =
75350 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
75351 if (err)
75352 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
75353 * checking falls down on this.
75354 */
75355 if (copy_from_user(ctl_buf,
75356 - (void __user __force *)msg_sys->msg_control,
75357 + (void __force_user *)msg_sys->msg_control,
75358 ctl_len))
75359 goto out_freectl;
75360 msg_sys->msg_control = ctl_buf;
75361 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
75362 * kernel msghdr to use the kernel address space)
75363 */
75364
75365 - uaddr = (__force void __user *)msg_sys->msg_name;
75366 + uaddr = (void __force_user *)msg_sys->msg_name;
75367 uaddr_len = COMPAT_NAMELEN(msg);
75368 if (MSG_CMSG_COMPAT & flags) {
75369 err = verify_compat_iovec(msg_sys, iov,
75370 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75371 }
75372
75373 ifr = compat_alloc_user_space(buf_size);
75374 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
75375 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
75376
75377 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
75378 return -EFAULT;
75379 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75380 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
75381
75382 if (copy_in_user(rxnfc, compat_rxnfc,
75383 - (void *)(&rxnfc->fs.m_ext + 1) -
75384 - (void *)rxnfc) ||
75385 + (void __user *)(&rxnfc->fs.m_ext + 1) -
75386 + (void __user *)rxnfc) ||
75387 copy_in_user(&rxnfc->fs.ring_cookie,
75388 &compat_rxnfc->fs.ring_cookie,
75389 - (void *)(&rxnfc->fs.location + 1) -
75390 - (void *)&rxnfc->fs.ring_cookie) ||
75391 + (void __user *)(&rxnfc->fs.location + 1) -
75392 + (void __user *)&rxnfc->fs.ring_cookie) ||
75393 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
75394 sizeof(rxnfc->rule_cnt)))
75395 return -EFAULT;
75396 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75397
75398 if (convert_out) {
75399 if (copy_in_user(compat_rxnfc, rxnfc,
75400 - (const void *)(&rxnfc->fs.m_ext + 1) -
75401 - (const void *)rxnfc) ||
75402 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
75403 + (const void __user *)rxnfc) ||
75404 copy_in_user(&compat_rxnfc->fs.ring_cookie,
75405 &rxnfc->fs.ring_cookie,
75406 - (const void *)(&rxnfc->fs.location + 1) -
75407 - (const void *)&rxnfc->fs.ring_cookie) ||
75408 + (const void __user *)(&rxnfc->fs.location + 1) -
75409 + (const void __user *)&rxnfc->fs.ring_cookie) ||
75410 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
75411 sizeof(rxnfc->rule_cnt)))
75412 return -EFAULT;
75413 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
75414 old_fs = get_fs();
75415 set_fs(KERNEL_DS);
75416 err = dev_ioctl(net, cmd,
75417 - (struct ifreq __user __force *) &kifr);
75418 + (struct ifreq __force_user *) &kifr);
75419 set_fs(old_fs);
75420
75421 return err;
75422 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
75423
75424 old_fs = get_fs();
75425 set_fs(KERNEL_DS);
75426 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
75427 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
75428 set_fs(old_fs);
75429
75430 if (cmd == SIOCGIFMAP && !err) {
75431 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
75432 ret |= __get_user(rtdev, &(ur4->rt_dev));
75433 if (rtdev) {
75434 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
75435 - r4.rt_dev = (char __user __force *)devname;
75436 + r4.rt_dev = (char __force_user *)devname;
75437 devname[15] = 0;
75438 } else
75439 r4.rt_dev = NULL;
75440 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
75441 int __user *uoptlen;
75442 int err;
75443
75444 - uoptval = (char __user __force *) optval;
75445 - uoptlen = (int __user __force *) optlen;
75446 + uoptval = (char __force_user *) optval;
75447 + uoptlen = (int __force_user *) optlen;
75448
75449 set_fs(KERNEL_DS);
75450 if (level == SOL_SOCKET)
75451 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
75452 char __user *uoptval;
75453 int err;
75454
75455 - uoptval = (char __user __force *) optval;
75456 + uoptval = (char __force_user *) optval;
75457
75458 set_fs(KERNEL_DS);
75459 if (level == SOL_SOCKET)
75460 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
75461 index 00a1a2a..6a0138a 100644
75462 --- a/net/sunrpc/sched.c
75463 +++ b/net/sunrpc/sched.c
75464 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
75465 #ifdef RPC_DEBUG
75466 static void rpc_task_set_debuginfo(struct rpc_task *task)
75467 {
75468 - static atomic_t rpc_pid;
75469 + static atomic_unchecked_t rpc_pid;
75470
75471 - task->tk_pid = atomic_inc_return(&rpc_pid);
75472 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75473 }
75474 #else
75475 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75476 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
75477 index 71bed1c..5dff36d 100644
75478 --- a/net/sunrpc/svcsock.c
75479 +++ b/net/sunrpc/svcsock.c
75480 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
75481 int buflen, unsigned int base)
75482 {
75483 size_t save_iovlen;
75484 - void __user *save_iovbase;
75485 + void *save_iovbase;
75486 unsigned int i;
75487 int ret;
75488
75489 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
75490 index 09af4fa..77110a9 100644
75491 --- a/net/sunrpc/xprtrdma/svc_rdma.c
75492 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
75493 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
75494 static unsigned int min_max_inline = 4096;
75495 static unsigned int max_max_inline = 65536;
75496
75497 -atomic_t rdma_stat_recv;
75498 -atomic_t rdma_stat_read;
75499 -atomic_t rdma_stat_write;
75500 -atomic_t rdma_stat_sq_starve;
75501 -atomic_t rdma_stat_rq_starve;
75502 -atomic_t rdma_stat_rq_poll;
75503 -atomic_t rdma_stat_rq_prod;
75504 -atomic_t rdma_stat_sq_poll;
75505 -atomic_t rdma_stat_sq_prod;
75506 +atomic_unchecked_t rdma_stat_recv;
75507 +atomic_unchecked_t rdma_stat_read;
75508 +atomic_unchecked_t rdma_stat_write;
75509 +atomic_unchecked_t rdma_stat_sq_starve;
75510 +atomic_unchecked_t rdma_stat_rq_starve;
75511 +atomic_unchecked_t rdma_stat_rq_poll;
75512 +atomic_unchecked_t rdma_stat_rq_prod;
75513 +atomic_unchecked_t rdma_stat_sq_poll;
75514 +atomic_unchecked_t rdma_stat_sq_prod;
75515
75516 /* Temporary NFS request map and context caches */
75517 struct kmem_cache *svc_rdma_map_cachep;
75518 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
75519 len -= *ppos;
75520 if (len > *lenp)
75521 len = *lenp;
75522 - if (len && copy_to_user(buffer, str_buf, len))
75523 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75524 return -EFAULT;
75525 *lenp = len;
75526 *ppos += len;
75527 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
75528 {
75529 .procname = "rdma_stat_read",
75530 .data = &rdma_stat_read,
75531 - .maxlen = sizeof(atomic_t),
75532 + .maxlen = sizeof(atomic_unchecked_t),
75533 .mode = 0644,
75534 .proc_handler = read_reset_stat,
75535 },
75536 {
75537 .procname = "rdma_stat_recv",
75538 .data = &rdma_stat_recv,
75539 - .maxlen = sizeof(atomic_t),
75540 + .maxlen = sizeof(atomic_unchecked_t),
75541 .mode = 0644,
75542 .proc_handler = read_reset_stat,
75543 },
75544 {
75545 .procname = "rdma_stat_write",
75546 .data = &rdma_stat_write,
75547 - .maxlen = sizeof(atomic_t),
75548 + .maxlen = sizeof(atomic_unchecked_t),
75549 .mode = 0644,
75550 .proc_handler = read_reset_stat,
75551 },
75552 {
75553 .procname = "rdma_stat_sq_starve",
75554 .data = &rdma_stat_sq_starve,
75555 - .maxlen = sizeof(atomic_t),
75556 + .maxlen = sizeof(atomic_unchecked_t),
75557 .mode = 0644,
75558 .proc_handler = read_reset_stat,
75559 },
75560 {
75561 .procname = "rdma_stat_rq_starve",
75562 .data = &rdma_stat_rq_starve,
75563 - .maxlen = sizeof(atomic_t),
75564 + .maxlen = sizeof(atomic_unchecked_t),
75565 .mode = 0644,
75566 .proc_handler = read_reset_stat,
75567 },
75568 {
75569 .procname = "rdma_stat_rq_poll",
75570 .data = &rdma_stat_rq_poll,
75571 - .maxlen = sizeof(atomic_t),
75572 + .maxlen = sizeof(atomic_unchecked_t),
75573 .mode = 0644,
75574 .proc_handler = read_reset_stat,
75575 },
75576 {
75577 .procname = "rdma_stat_rq_prod",
75578 .data = &rdma_stat_rq_prod,
75579 - .maxlen = sizeof(atomic_t),
75580 + .maxlen = sizeof(atomic_unchecked_t),
75581 .mode = 0644,
75582 .proc_handler = read_reset_stat,
75583 },
75584 {
75585 .procname = "rdma_stat_sq_poll",
75586 .data = &rdma_stat_sq_poll,
75587 - .maxlen = sizeof(atomic_t),
75588 + .maxlen = sizeof(atomic_unchecked_t),
75589 .mode = 0644,
75590 .proc_handler = read_reset_stat,
75591 },
75592 {
75593 .procname = "rdma_stat_sq_prod",
75594 .data = &rdma_stat_sq_prod,
75595 - .maxlen = sizeof(atomic_t),
75596 + .maxlen = sizeof(atomic_unchecked_t),
75597 .mode = 0644,
75598 .proc_handler = read_reset_stat,
75599 },
75600 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75601 index df67211..c354b13 100644
75602 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75603 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75604 @@ -499,7 +499,7 @@ next_sge:
75605 svc_rdma_put_context(ctxt, 0);
75606 goto out;
75607 }
75608 - atomic_inc(&rdma_stat_read);
75609 + atomic_inc_unchecked(&rdma_stat_read);
75610
75611 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75612 chl_map->ch[ch_no].count -= read_wr.num_sge;
75613 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75614 dto_q);
75615 list_del_init(&ctxt->dto_q);
75616 } else {
75617 - atomic_inc(&rdma_stat_rq_starve);
75618 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75619 clear_bit(XPT_DATA, &xprt->xpt_flags);
75620 ctxt = NULL;
75621 }
75622 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75623 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75624 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75625 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75626 - atomic_inc(&rdma_stat_recv);
75627 + atomic_inc_unchecked(&rdma_stat_recv);
75628
75629 /* Build up the XDR from the receive buffers. */
75630 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75631 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75632 index 249a835..fb2794b 100644
75633 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75634 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75635 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
75636 write_wr.wr.rdma.remote_addr = to;
75637
75638 /* Post It */
75639 - atomic_inc(&rdma_stat_write);
75640 + atomic_inc_unchecked(&rdma_stat_write);
75641 if (svc_rdma_send(xprt, &write_wr))
75642 goto err;
75643 return 0;
75644 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75645 index ba1296d..0fec1a5 100644
75646 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
75647 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75648 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75649 return;
75650
75651 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75652 - atomic_inc(&rdma_stat_rq_poll);
75653 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75654
75655 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75656 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75657 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75658 }
75659
75660 if (ctxt)
75661 - atomic_inc(&rdma_stat_rq_prod);
75662 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75663
75664 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75665 /*
75666 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75667 return;
75668
75669 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75670 - atomic_inc(&rdma_stat_sq_poll);
75671 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75672 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75673 if (wc.status != IB_WC_SUCCESS)
75674 /* Close the transport */
75675 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75676 }
75677
75678 if (ctxt)
75679 - atomic_inc(&rdma_stat_sq_prod);
75680 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75681 }
75682
75683 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75684 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
75685 spin_lock_bh(&xprt->sc_lock);
75686 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75687 spin_unlock_bh(&xprt->sc_lock);
75688 - atomic_inc(&rdma_stat_sq_starve);
75689 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75690
75691 /* See if we can opportunistically reap SQ WR to make room */
75692 sq_cq_reap(xprt);
75693 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
75694 index e758139..d29ea47 100644
75695 --- a/net/sysctl_net.c
75696 +++ b/net/sysctl_net.c
75697 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
75698 struct ctl_table *table)
75699 {
75700 /* Allow network administrator to have same access as root. */
75701 - if (capable(CAP_NET_ADMIN)) {
75702 + if (capable_nolog(CAP_NET_ADMIN)) {
75703 int mode = (table->mode >> 6) & 7;
75704 return (mode << 6) | (mode << 3) | mode;
75705 }
75706 diff --git a/net/tipc/link.c b/net/tipc/link.c
75707 index ae98a72..7bb6056 100644
75708 --- a/net/tipc/link.c
75709 +++ b/net/tipc/link.c
75710 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
75711 struct tipc_msg fragm_hdr;
75712 struct sk_buff *buf, *buf_chain, *prev;
75713 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75714 - const unchar *sect_crs;
75715 + const unchar __user *sect_crs;
75716 int curr_sect;
75717 u32 fragm_no;
75718
75719 @@ -1247,7 +1247,7 @@ again:
75720
75721 if (!sect_rest) {
75722 sect_rest = msg_sect[++curr_sect].iov_len;
75723 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75724 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75725 }
75726
75727 if (sect_rest < fragm_rest)
75728 @@ -1266,7 +1266,7 @@ error:
75729 }
75730 } else
75731 skb_copy_to_linear_data_offset(buf, fragm_crs,
75732 - sect_crs, sz);
75733 + (const void __force_kernel *)sect_crs, sz);
75734 sect_crs += sz;
75735 sect_rest -= sz;
75736 fragm_crs += sz;
75737 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
75738 index 83d5096..dcba497 100644
75739 --- a/net/tipc/msg.c
75740 +++ b/net/tipc/msg.c
75741 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
75742 msg_sect[cnt].iov_len);
75743 else
75744 skb_copy_to_linear_data_offset(*buf, pos,
75745 - msg_sect[cnt].iov_base,
75746 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75747 msg_sect[cnt].iov_len);
75748 pos += msg_sect[cnt].iov_len;
75749 }
75750 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
75751 index 1983717..4d6102c 100644
75752 --- a/net/tipc/subscr.c
75753 +++ b/net/tipc/subscr.c
75754 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
75755 {
75756 struct iovec msg_sect;
75757
75758 - msg_sect.iov_base = (void *)&sub->evt;
75759 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75760 msg_sect.iov_len = sizeof(struct tipc_event);
75761
75762 sub->evt.event = htohl(event, sub->swap);
75763 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
75764 index d99678a..3514a21 100644
75765 --- a/net/unix/af_unix.c
75766 +++ b/net/unix/af_unix.c
75767 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
75768 err = -ECONNREFUSED;
75769 if (!S_ISSOCK(inode->i_mode))
75770 goto put_fail;
75771 +
75772 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75773 + err = -EACCES;
75774 + goto put_fail;
75775 + }
75776 +
75777 u = unix_find_socket_byinode(inode);
75778 if (!u)
75779 goto put_fail;
75780 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
75781 if (u) {
75782 struct dentry *dentry;
75783 dentry = unix_sk(u)->dentry;
75784 +
75785 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75786 + err = -EPERM;
75787 + sock_put(u);
75788 + goto fail;
75789 + }
75790 +
75791 if (dentry)
75792 touch_atime(unix_sk(u)->mnt, dentry);
75793 } else
75794 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
75795 err = security_path_mknod(&path, dentry, mode, 0);
75796 if (err)
75797 goto out_mknod_drop_write;
75798 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75799 + err = -EACCES;
75800 + goto out_mknod_drop_write;
75801 + }
75802 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75803 out_mknod_drop_write:
75804 mnt_drop_write(path.mnt);
75805 if (err)
75806 goto out_mknod_dput;
75807 +
75808 + gr_handle_create(dentry, path.mnt);
75809 +
75810 mutex_unlock(&path.dentry->d_inode->i_mutex);
75811 dput(path.dentry);
75812 path.dentry = dentry;
75813 diff --git a/net/wireless/core.h b/net/wireless/core.h
75814 index b9ec306..b4a563e 100644
75815 --- a/net/wireless/core.h
75816 +++ b/net/wireless/core.h
75817 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75818 struct mutex mtx;
75819
75820 /* rfkill support */
75821 - struct rfkill_ops rfkill_ops;
75822 + rfkill_ops_no_const rfkill_ops;
75823 struct rfkill *rfkill;
75824 struct work_struct rfkill_sync;
75825
75826 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
75827 index 0af7f54..c916d2f 100644
75828 --- a/net/wireless/wext-core.c
75829 +++ b/net/wireless/wext-core.c
75830 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75831 */
75832
75833 /* Support for very large requests */
75834 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75835 - (user_length > descr->max_tokens)) {
75836 + if (user_length > descr->max_tokens) {
75837 /* Allow userspace to GET more than max so
75838 * we can support any size GET requests.
75839 * There is still a limit : -ENOMEM.
75840 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75841 }
75842 }
75843
75844 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75845 - /*
75846 - * If this is a GET, but not NOMAX, it means that the extra
75847 - * data is not bounded by userspace, but by max_tokens. Thus
75848 - * set the length to max_tokens. This matches the extra data
75849 - * allocation.
75850 - * The driver should fill it with the number of tokens it
75851 - * provided, and it may check iwp->length rather than having
75852 - * knowledge of max_tokens. If the driver doesn't change the
75853 - * iwp->length, this ioctl just copies back max_token tokens
75854 - * filled with zeroes. Hopefully the driver isn't claiming
75855 - * them to be valid data.
75856 - */
75857 - iwp->length = descr->max_tokens;
75858 - }
75859 -
75860 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75861
75862 iwp->length += essid_compat;
75863 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
75864 index 9049a5c..cfa6f5c 100644
75865 --- a/net/xfrm/xfrm_policy.c
75866 +++ b/net/xfrm/xfrm_policy.c
75867 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
75868 {
75869 policy->walk.dead = 1;
75870
75871 - atomic_inc(&policy->genid);
75872 + atomic_inc_unchecked(&policy->genid);
75873
75874 if (del_timer(&policy->timer))
75875 xfrm_pol_put(policy);
75876 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
75877 hlist_add_head(&policy->bydst, chain);
75878 xfrm_pol_hold(policy);
75879 net->xfrm.policy_count[dir]++;
75880 - atomic_inc(&flow_cache_genid);
75881 + atomic_inc_unchecked(&flow_cache_genid);
75882 if (delpol)
75883 __xfrm_policy_unlink(delpol, dir);
75884 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75885 @@ -1530,7 +1530,7 @@ free_dst:
75886 goto out;
75887 }
75888
75889 -static int inline
75890 +static inline int
75891 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75892 {
75893 if (!*target) {
75894 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
75895 return 0;
75896 }
75897
75898 -static int inline
75899 +static inline int
75900 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75901 {
75902 #ifdef CONFIG_XFRM_SUB_POLICY
75903 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75904 #endif
75905 }
75906
75907 -static int inline
75908 +static inline int
75909 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75910 {
75911 #ifdef CONFIG_XFRM_SUB_POLICY
75912 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
75913
75914 xdst->num_pols = num_pols;
75915 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75916 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75917 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75918
75919 return xdst;
75920 }
75921 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
75922 if (xdst->xfrm_genid != dst->xfrm->genid)
75923 return 0;
75924 if (xdst->num_pols > 0 &&
75925 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75926 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75927 return 0;
75928
75929 mtu = dst_mtu(dst->child);
75930 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
75931 sizeof(pol->xfrm_vec[i].saddr));
75932 pol->xfrm_vec[i].encap_family = mp->new_family;
75933 /* flush bundles */
75934 - atomic_inc(&pol->genid);
75935 + atomic_inc_unchecked(&pol->genid);
75936 }
75937 }
75938
75939 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
75940 index d2b366c..51ff91e 100644
75941 --- a/scripts/Makefile.build
75942 +++ b/scripts/Makefile.build
75943 @@ -109,7 +109,7 @@ endif
75944 endif
75945
75946 # Do not include host rules unless needed
75947 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75948 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75949 include scripts/Makefile.host
75950 endif
75951
75952 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
75953 index 686cb0d..9d653bf 100644
75954 --- a/scripts/Makefile.clean
75955 +++ b/scripts/Makefile.clean
75956 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
75957 __clean-files := $(extra-y) $(always) \
75958 $(targets) $(clean-files) \
75959 $(host-progs) \
75960 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75961 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75962 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75963
75964 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75965
75966 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
75967 index 1ac414f..a1c1451 100644
75968 --- a/scripts/Makefile.host
75969 +++ b/scripts/Makefile.host
75970 @@ -31,6 +31,7 @@
75971 # Note: Shared libraries consisting of C++ files are not supported
75972
75973 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75974 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75975
75976 # C code
75977 # Executables compiled from a single .c file
75978 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
75979 # Shared libaries (only .c supported)
75980 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75981 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75982 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75983 # Remove .so files from "xxx-objs"
75984 host-cobjs := $(filter-out %.so,$(host-cobjs))
75985
75986 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
75987 index cb1f50c..cef2a7c 100644
75988 --- a/scripts/basic/fixdep.c
75989 +++ b/scripts/basic/fixdep.c
75990 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
75991 /*
75992 * Lookup a value in the configuration string.
75993 */
75994 -static int is_defined_config(const char *name, int len, unsigned int hash)
75995 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75996 {
75997 struct item *aux;
75998
75999 @@ -211,10 +211,10 @@ static void clear_config(void)
76000 /*
76001 * Record the use of a CONFIG_* word.
76002 */
76003 -static void use_config(const char *m, int slen)
76004 +static void use_config(const char *m, unsigned int slen)
76005 {
76006 unsigned int hash = strhash(m, slen);
76007 - int c, i;
76008 + unsigned int c, i;
76009
76010 if (is_defined_config(m, slen, hash))
76011 return;
76012 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
76013
76014 static void parse_config_file(const char *map, size_t len)
76015 {
76016 - const int *end = (const int *) (map + len);
76017 + const unsigned int *end = (const unsigned int *) (map + len);
76018 /* start at +1, so that p can never be < map */
76019 - const int *m = (const int *) map + 1;
76020 + const unsigned int *m = (const unsigned int *) map + 1;
76021 const char *p, *q;
76022
76023 for (; m < end; m++) {
76024 @@ -406,7 +406,7 @@ static void print_deps(void)
76025 static void traps(void)
76026 {
76027 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
76028 - int *p = (int *)test;
76029 + unsigned int *p = (unsigned int *)test;
76030
76031 if (*p != INT_CONF) {
76032 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
76033 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
76034 new file mode 100644
76035 index 0000000..8729101
76036 --- /dev/null
76037 +++ b/scripts/gcc-plugin.sh
76038 @@ -0,0 +1,2 @@
76039 +#!/bin/sh
76040 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
76041 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
76042 index f936d1f..a66d95f 100644
76043 --- a/scripts/mod/file2alias.c
76044 +++ b/scripts/mod/file2alias.c
76045 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
76046 unsigned long size, unsigned long id_size,
76047 void *symval)
76048 {
76049 - int i;
76050 + unsigned int i;
76051
76052 if (size % id_size || size < id_size) {
76053 if (cross_build != 0)
76054 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
76055 /* USB is special because the bcdDevice can be matched against a numeric range */
76056 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
76057 static void do_usb_entry(struct usb_device_id *id,
76058 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
76059 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
76060 unsigned char range_lo, unsigned char range_hi,
76061 unsigned char max, struct module *mod)
76062 {
76063 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
76064 {
76065 unsigned int devlo, devhi;
76066 unsigned char chi, clo, max;
76067 - int ndigits;
76068 + unsigned int ndigits;
76069
76070 id->match_flags = TO_NATIVE(id->match_flags);
76071 id->idVendor = TO_NATIVE(id->idVendor);
76072 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
76073 for (i = 0; i < count; i++) {
76074 const char *id = (char *)devs[i].id;
76075 char acpi_id[sizeof(devs[0].id)];
76076 - int j;
76077 + unsigned int j;
76078
76079 buf_printf(&mod->dev_table_buf,
76080 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76081 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76082
76083 for (j = 0; j < PNP_MAX_DEVICES; j++) {
76084 const char *id = (char *)card->devs[j].id;
76085 - int i2, j2;
76086 + unsigned int i2, j2;
76087 int dup = 0;
76088
76089 if (!id[0])
76090 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76091 /* add an individual alias for every device entry */
76092 if (!dup) {
76093 char acpi_id[sizeof(card->devs[0].id)];
76094 - int k;
76095 + unsigned int k;
76096
76097 buf_printf(&mod->dev_table_buf,
76098 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76099 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
76100 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
76101 char *alias)
76102 {
76103 - int i, j;
76104 + unsigned int i, j;
76105
76106 sprintf(alias, "dmi*");
76107
76108 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
76109 index 2bd594e..d43245e 100644
76110 --- a/scripts/mod/modpost.c
76111 +++ b/scripts/mod/modpost.c
76112 @@ -919,6 +919,7 @@ enum mismatch {
76113 ANY_INIT_TO_ANY_EXIT,
76114 ANY_EXIT_TO_ANY_INIT,
76115 EXPORT_TO_INIT_EXIT,
76116 + DATA_TO_TEXT
76117 };
76118
76119 struct sectioncheck {
76120 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
76121 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
76122 .mismatch = EXPORT_TO_INIT_EXIT,
76123 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
76124 +},
76125 +/* Do not reference code from writable data */
76126 +{
76127 + .fromsec = { DATA_SECTIONS, NULL },
76128 + .tosec = { TEXT_SECTIONS, NULL },
76129 + .mismatch = DATA_TO_TEXT
76130 }
76131 };
76132
76133 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
76134 continue;
76135 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
76136 continue;
76137 - if (sym->st_value == addr)
76138 - return sym;
76139 /* Find a symbol nearby - addr are maybe negative */
76140 d = sym->st_value - addr;
76141 + if (d == 0)
76142 + return sym;
76143 if (d < 0)
76144 d = addr - sym->st_value;
76145 if (d < distance) {
76146 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
76147 tosym, prl_to, prl_to, tosym);
76148 free(prl_to);
76149 break;
76150 + case DATA_TO_TEXT:
76151 +/*
76152 + fprintf(stderr,
76153 + "The variable %s references\n"
76154 + "the %s %s%s%s\n",
76155 + fromsym, to, sec2annotation(tosec), tosym, to_p);
76156 +*/
76157 + break;
76158 }
76159 fprintf(stderr, "\n");
76160 }
76161 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
76162 static void check_sec_ref(struct module *mod, const char *modname,
76163 struct elf_info *elf)
76164 {
76165 - int i;
76166 + unsigned int i;
76167 Elf_Shdr *sechdrs = elf->sechdrs;
76168
76169 /* Walk through all sections */
76170 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
76171 va_end(ap);
76172 }
76173
76174 -void buf_write(struct buffer *buf, const char *s, int len)
76175 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
76176 {
76177 if (buf->size - buf->pos < len) {
76178 buf->size += len + SZ;
76179 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
76180 if (fstat(fileno(file), &st) < 0)
76181 goto close_write;
76182
76183 - if (st.st_size != b->pos)
76184 + if (st.st_size != (off_t)b->pos)
76185 goto close_write;
76186
76187 tmp = NOFAIL(malloc(b->pos));
76188 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
76189 index 2031119..b5433af 100644
76190 --- a/scripts/mod/modpost.h
76191 +++ b/scripts/mod/modpost.h
76192 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
76193
76194 struct buffer {
76195 char *p;
76196 - int pos;
76197 - int size;
76198 + unsigned int pos;
76199 + unsigned int size;
76200 };
76201
76202 void __attribute__((format(printf, 2, 3)))
76203 buf_printf(struct buffer *buf, const char *fmt, ...);
76204
76205 void
76206 -buf_write(struct buffer *buf, const char *s, int len);
76207 +buf_write(struct buffer *buf, const char *s, unsigned int len);
76208
76209 struct module {
76210 struct module *next;
76211 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
76212 index 9dfcd6d..099068e 100644
76213 --- a/scripts/mod/sumversion.c
76214 +++ b/scripts/mod/sumversion.c
76215 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
76216 goto out;
76217 }
76218
76219 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
76220 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
76221 warn("writing sum in %s failed: %s\n",
76222 filename, strerror(errno));
76223 goto out;
76224 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
76225 index 5c11312..72742b5 100644
76226 --- a/scripts/pnmtologo.c
76227 +++ b/scripts/pnmtologo.c
76228 @@ -237,14 +237,14 @@ static void write_header(void)
76229 fprintf(out, " * Linux logo %s\n", logoname);
76230 fputs(" */\n\n", out);
76231 fputs("#include <linux/linux_logo.h>\n\n", out);
76232 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
76233 + fprintf(out, "static unsigned char %s_data[] = {\n",
76234 logoname);
76235 }
76236
76237 static void write_footer(void)
76238 {
76239 fputs("\n};\n\n", out);
76240 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
76241 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
76242 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
76243 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
76244 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
76245 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
76246 fputs("\n};\n\n", out);
76247
76248 /* write logo clut */
76249 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
76250 + fprintf(out, "static unsigned char %s_clut[] = {\n",
76251 logoname);
76252 write_hex_cnt = 0;
76253 for (i = 0; i < logo_clutsize; i++) {
76254 diff --git a/security/Kconfig b/security/Kconfig
76255 index 51bd5a0..eeabc9f 100644
76256 --- a/security/Kconfig
76257 +++ b/security/Kconfig
76258 @@ -4,6 +4,627 @@
76259
76260 menu "Security options"
76261
76262 +source grsecurity/Kconfig
76263 +
76264 +menu "PaX"
76265 +
76266 + config ARCH_TRACK_EXEC_LIMIT
76267 + bool
76268 +
76269 + config PAX_KERNEXEC_PLUGIN
76270 + bool
76271 +
76272 + config PAX_PER_CPU_PGD
76273 + bool
76274 +
76275 + config TASK_SIZE_MAX_SHIFT
76276 + int
76277 + depends on X86_64
76278 + default 47 if !PAX_PER_CPU_PGD
76279 + default 42 if PAX_PER_CPU_PGD
76280 +
76281 + config PAX_ENABLE_PAE
76282 + bool
76283 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
76284 +
76285 +config PAX
76286 + bool "Enable various PaX features"
76287 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
76288 + help
76289 + This allows you to enable various PaX features. PaX adds
76290 + intrusion prevention mechanisms to the kernel that reduce
76291 + the risks posed by exploitable memory corruption bugs.
76292 +
76293 +menu "PaX Control"
76294 + depends on PAX
76295 +
76296 +config PAX_SOFTMODE
76297 + bool 'Support soft mode'
76298 + help
76299 + Enabling this option will allow you to run PaX in soft mode, that
76300 + is, PaX features will not be enforced by default, only on executables
76301 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
76302 + support as they are the only way to mark executables for soft mode use.
76303 +
76304 + Soft mode can be activated by using the "pax_softmode=1" kernel command
76305 + line option on boot. Furthermore you can control various PaX features
76306 + at runtime via the entries in /proc/sys/kernel/pax.
76307 +
76308 +config PAX_EI_PAX
76309 + bool 'Use legacy ELF header marking'
76310 + help
76311 + Enabling this option will allow you to control PaX features on
76312 + a per executable basis via the 'chpax' utility available at
76313 + http://pax.grsecurity.net/. The control flags will be read from
76314 + an otherwise reserved part of the ELF header. This marking has
76315 + numerous drawbacks (no support for soft-mode, toolchain does not
76316 + know about the non-standard use of the ELF header) therefore it
76317 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
76318 + support.
76319 +
76320 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76321 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
76322 + option otherwise they will not get any protection.
76323 +
76324 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
76325 + support as well, they will override the legacy EI_PAX marks.
76326 +
76327 +config PAX_PT_PAX_FLAGS
76328 + bool 'Use ELF program header marking'
76329 + help
76330 + Enabling this option will allow you to control PaX features on
76331 + a per executable basis via the 'paxctl' utility available at
76332 + http://pax.grsecurity.net/. The control flags will be read from
76333 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
76334 + has the benefits of supporting both soft mode and being fully
76335 + integrated into the toolchain (the binutils patch is available
76336 + from http://pax.grsecurity.net).
76337 +
76338 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76339 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76340 + support otherwise they will not get any protection.
76341 +
76342 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76343 + must make sure that the marks are the same if a binary has both marks.
76344 +
76345 + Note that if you enable the legacy EI_PAX marking support as well,
76346 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
76347 +
76348 +config PAX_XATTR_PAX_FLAGS
76349 + bool 'Use filesystem extended attributes marking'
76350 + depends on EXPERT
76351 + select CIFS_XATTR if CIFS
76352 + select EXT2_FS_XATTR if EXT2_FS
76353 + select EXT3_FS_XATTR if EXT3_FS
76354 + select EXT4_FS_XATTR if EXT4_FS
76355 + select JFFS2_FS_XATTR if JFFS2_FS
76356 + select REISERFS_FS_XATTR if REISERFS_FS
76357 + select SQUASHFS_XATTR if SQUASHFS
76358 + select TMPFS_XATTR if TMPFS
76359 + select UBIFS_FS_XATTR if UBIFS_FS
76360 + help
76361 + Enabling this option will allow you to control PaX features on
76362 + a per executable basis via the 'setfattr' utility. The control
76363 + flags will be read from the user.pax.flags extended attribute of
76364 + the file. This marking has the benefit of supporting binary-only
76365 + applications that self-check themselves (e.g., skype) and would
76366 + not tolerate chpax/paxctl changes. The main drawback is that
76367 + extended attributes are not supported by some filesystems (e.g.,
76368 + isofs, udf, vfat) so copying files through such filesystems will
76369 + lose the extended attributes and these PaX markings.
76370 +
76371 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76372 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76373 + support otherwise they will not get any protection.
76374 +
76375 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76376 + must make sure that the marks are the same if a binary has both marks.
76377 +
76378 + Note that if you enable the legacy EI_PAX marking support as well,
76379 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
76380 +
76381 +choice
76382 + prompt 'MAC system integration'
76383 + default PAX_HAVE_ACL_FLAGS
76384 + help
76385 + Mandatory Access Control systems have the option of controlling
76386 + PaX flags on a per executable basis, choose the method supported
76387 + by your particular system.
76388 +
76389 + - "none": if your MAC system does not interact with PaX,
76390 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
76391 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
76392 +
76393 + NOTE: this option is for developers/integrators only.
76394 +
76395 + config PAX_NO_ACL_FLAGS
76396 + bool 'none'
76397 +
76398 + config PAX_HAVE_ACL_FLAGS
76399 + bool 'direct'
76400 +
76401 + config PAX_HOOK_ACL_FLAGS
76402 + bool 'hook'
76403 +endchoice
76404 +
76405 +endmenu
76406 +
76407 +menu "Non-executable pages"
76408 + depends on PAX
76409 +
76410 +config PAX_NOEXEC
76411 + bool "Enforce non-executable pages"
76412 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
76413 + help
76414 + By design some architectures do not allow for protecting memory
76415 + pages against execution or even if they do, Linux does not make
76416 + use of this feature. In practice this means that if a page is
76417 + readable (such as the stack or heap) it is also executable.
76418 +
76419 + There is a well known exploit technique that makes use of this
76420 + fact and a common programming mistake where an attacker can
76421 + introduce code of his choice somewhere in the attacked program's
76422 + memory (typically the stack or the heap) and then execute it.
76423 +
76424 + If the attacked program was running with different (typically
76425 + higher) privileges than that of the attacker, then he can elevate
76426 + his own privilege level (e.g. get a root shell, write to files for
76427 + which he does not have write access to, etc).
76428 +
76429 + Enabling this option will let you choose from various features
76430 + that prevent the injection and execution of 'foreign' code in
76431 + a program.
76432 +
76433 + This will also break programs that rely on the old behaviour and
76434 + expect that dynamically allocated memory via the malloc() family
76435 + of functions is executable (which it is not). Notable examples
76436 + are the XFree86 4.x server, the java runtime and wine.
76437 +
76438 +config PAX_PAGEEXEC
76439 + bool "Paging based non-executable pages"
76440 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
76441 + select S390_SWITCH_AMODE if S390
76442 + select S390_EXEC_PROTECT if S390
76443 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76444 + help
76445 + This implementation is based on the paging feature of the CPU.
76446 + On i386 without hardware non-executable bit support there is a
76447 + variable but usually low performance impact, however on Intel's
76448 + P4 core based CPUs it is very high so you should not enable this
76449 + for kernels meant to be used on such CPUs.
76450 +
76451 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76452 + with hardware non-executable bit support there is no performance
76453 + impact, on ppc the impact is negligible.
76454 +
76455 + Note that several architectures require various emulations due to
76456 + badly designed userland ABIs, this will cause a performance impact
76457 + but will disappear as soon as userland is fixed. For example, ppc
76458 + userland MUST have been built with secure-plt by a recent toolchain.
76459 +
76460 +config PAX_SEGMEXEC
76461 + bool "Segmentation based non-executable pages"
76462 + depends on PAX_NOEXEC && X86_32
76463 + help
76464 + This implementation is based on the segmentation feature of the
76465 + CPU and has a very small performance impact, however applications
76466 + will be limited to a 1.5 GB address space instead of the normal
76467 + 3 GB.
76468 +
76469 +config PAX_EMUTRAMP
76470 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76471 + default y if PARISC
76472 + help
76473 + There are some programs and libraries that for one reason or
76474 + another attempt to execute special small code snippets from
76475 + non-executable memory pages. Most notable examples are the
76476 + signal handler return code generated by the kernel itself and
76477 + the GCC trampolines.
76478 +
76479 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76480 + such programs will no longer work under your kernel.
76481 +
76482 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76483 + utilities to enable trampoline emulation for the affected programs
76484 + yet still have the protection provided by the non-executable pages.
76485 +
76486 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76487 + your system will not even boot.
76488 +
76489 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76490 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76491 + for the affected files.
76492 +
76493 + NOTE: enabling this feature *may* open up a loophole in the
76494 + protection provided by non-executable pages that an attacker
76495 + could abuse. Therefore the best solution is to not have any
76496 + files on your system that would require this option. This can
76497 + be achieved by not using libc5 (which relies on the kernel
76498 + signal handler return code) and not using or rewriting programs
76499 + that make use of the nested function implementation of GCC.
76500 + Skilled users can just fix GCC itself so that it implements
76501 + nested function calls in a way that does not interfere with PaX.
76502 +
76503 +config PAX_EMUSIGRT
76504 + bool "Automatically emulate sigreturn trampolines"
76505 + depends on PAX_EMUTRAMP && PARISC
76506 + default y
76507 + help
76508 + Enabling this option will have the kernel automatically detect
76509 + and emulate signal return trampolines executing on the stack
76510 + that would otherwise lead to task termination.
76511 +
76512 + This solution is intended as a temporary one for users with
76513 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76514 + Modula-3 runtime, etc) or executables linked to such, basically
76515 + everything that does not specify its own SA_RESTORER function in
76516 + normal executable memory like glibc 2.1+ does.
76517 +
76518 + On parisc you MUST enable this option, otherwise your system will
76519 + not even boot.
76520 +
76521 + NOTE: this feature cannot be disabled on a per executable basis
76522 + and since it *does* open up a loophole in the protection provided
76523 + by non-executable pages, the best solution is to not have any
76524 + files on your system that would require this option.
76525 +
76526 +config PAX_MPROTECT
76527 + bool "Restrict mprotect()"
76528 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76529 + help
76530 + Enabling this option will prevent programs from
76531 + - changing the executable status of memory pages that were
76532 + not originally created as executable,
76533 + - making read-only executable pages writable again,
76534 + - creating executable pages from anonymous memory,
76535 + - making read-only-after-relocations (RELRO) data pages writable again.
76536 +
76537 + You should say Y here to complete the protection provided by
76538 + the enforcement of non-executable pages.
76539 +
76540 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76541 + this feature on a per file basis.
76542 +
76543 +config PAX_MPROTECT_COMPAT
76544 + bool "Use legacy/compat protection demoting (read help)"
76545 + depends on PAX_MPROTECT
76546 + default n
76547 + help
76548 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76549 + by sending the proper error code to the application. For some broken
76550 + userland, this can cause problems with Python or other applications. The
76551 + current implementation however allows for applications like clamav to
76552 + detect if JIT compilation/execution is allowed and to fall back gracefully
76553 + to an interpreter-based mode if it does not. While we encourage everyone
76554 + to use the current implementation as-is and push upstream to fix broken
76555 + userland (note that the RWX logging option can assist with this), in some
76556 + environments this may not be possible. Having to disable MPROTECT
76557 + completely on certain binaries reduces the security benefit of PaX,
76558 + so this option is provided for those environments to revert to the old
76559 + behavior.
76560 +
76561 +config PAX_ELFRELOCS
76562 + bool "Allow ELF text relocations (read help)"
76563 + depends on PAX_MPROTECT
76564 + default n
76565 + help
76566 + Non-executable pages and mprotect() restrictions are effective
76567 + in preventing the introduction of new executable code into an
76568 + attacked task's address space. There remain only two venues
76569 + for this kind of attack: if the attacker can execute already
76570 + existing code in the attacked task then he can either have it
76571 + create and mmap() a file containing his code or have it mmap()
76572 + an already existing ELF library that does not have position
76573 + independent code in it and use mprotect() on it to make it
76574 + writable and copy his code there. While protecting against
76575 + the former approach is beyond PaX, the latter can be prevented
76576 + by having only PIC ELF libraries on one's system (which do not
76577 + need to relocate their code). If you are sure this is your case,
76578 + as is the case with all modern Linux distributions, then leave
76579 + this option disabled. You should say 'n' here.
76580 +
76581 +config PAX_ETEXECRELOCS
76582 + bool "Allow ELF ET_EXEC text relocations"
76583 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76584 + select PAX_ELFRELOCS
76585 + default y
76586 + help
76587 + On some architectures there are incorrectly created applications
76588 + that require text relocations and would not work without enabling
76589 + this option. If you are an alpha, ia64 or parisc user, you should
76590 + enable this option and disable it once you have made sure that
76591 + none of your applications need it.
76592 +
76593 +config PAX_EMUPLT
76594 + bool "Automatically emulate ELF PLT"
76595 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76596 + default y
76597 + help
76598 + Enabling this option will have the kernel automatically detect
76599 + and emulate the Procedure Linkage Table entries in ELF files.
76600 + On some architectures such entries are in writable memory, and
76601 + become non-executable leading to task termination. Therefore
76602 + it is mandatory that you enable this option on alpha, parisc,
76603 + sparc and sparc64, otherwise your system would not even boot.
76604 +
76605 + NOTE: this feature *does* open up a loophole in the protection
76606 + provided by the non-executable pages, therefore the proper
76607 + solution is to modify the toolchain to produce a PLT that does
76608 + not need to be writable.
76609 +
76610 +config PAX_DLRESOLVE
76611 + bool 'Emulate old glibc resolver stub'
76612 + depends on PAX_EMUPLT && SPARC
76613 + default n
76614 + help
76615 + This option is needed if userland has an old glibc (before 2.4)
76616 + that puts a 'save' instruction into the runtime generated resolver
76617 + stub that needs special emulation.
76618 +
76619 +config PAX_KERNEXEC
76620 + bool "Enforce non-executable kernel pages"
76621 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76622 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76623 + select PAX_KERNEXEC_PLUGIN if X86_64
76624 + help
76625 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76626 + that is, enabling this option will make it harder to inject
76627 + and execute 'foreign' code in kernel memory itself.
76628 +
76629 + Note that on x86_64 kernels there is a known regression when
76630 + this feature and KVM/VMX are both enabled in the host kernel.
76631 +
76632 +choice
76633 + prompt "Return Address Instrumentation Method"
76634 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
76635 + depends on PAX_KERNEXEC_PLUGIN
76636 + help
76637 + Select the method used to instrument function pointer dereferences.
76638 + Note that binary modules cannot be instrumented by this approach.
76639 +
76640 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
76641 + bool "bts"
76642 + help
76643 + This method is compatible with binary only modules but has
76644 + a higher runtime overhead.
76645 +
76646 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
76647 + bool "or"
76648 + depends on !PARAVIRT
76649 + help
76650 + This method is incompatible with binary only modules but has
76651 + a lower runtime overhead.
76652 +endchoice
76653 +
76654 +config PAX_KERNEXEC_PLUGIN_METHOD
76655 + string
76656 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
76657 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
76658 + default ""
76659 +
76660 +config PAX_KERNEXEC_MODULE_TEXT
76661 + int "Minimum amount of memory reserved for module code"
76662 + default "4"
76663 + depends on PAX_KERNEXEC && X86_32 && MODULES
76664 + help
76665 + Due to implementation details the kernel must reserve a fixed
76666 + amount of memory for module code at compile time that cannot be
76667 + changed at runtime. Here you can specify the minimum amount
76668 + in MB that will be reserved. Due to the same implementation
76669 + details this size will always be rounded up to the next 2/4 MB
76670 + boundary (depends on PAE) so the actually available memory for
76671 + module code will usually be more than this minimum.
76672 +
76673 + The default 4 MB should be enough for most users but if you have
76674 + an excessive number of modules (e.g., most distribution configs
76675 + compile many drivers as modules) or use huge modules such as
76676 + nvidia's kernel driver, you will need to adjust this amount.
76677 + A good rule of thumb is to look at your currently loaded kernel
76678 + modules and add up their sizes.
76679 +
76680 +endmenu
76681 +
76682 +menu "Address Space Layout Randomization"
76683 + depends on PAX
76684 +
76685 +config PAX_ASLR
76686 + bool "Address Space Layout Randomization"
76687 + help
76688 + Many if not most exploit techniques rely on the knowledge of
76689 + certain addresses in the attacked program. The following options
76690 + will allow the kernel to apply a certain amount of randomization
76691 + to specific parts of the program thereby forcing an attacker to
76692 + guess them in most cases. Any failed guess will most likely crash
76693 + the attacked program which allows the kernel to detect such attempts
76694 + and react on them. PaX itself provides no reaction mechanisms,
76695 + instead it is strongly encouraged that you make use of Nergal's
76696 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76697 + (http://www.grsecurity.net/) built-in crash detection features or
76698 + develop one yourself.
76699 +
76700 + By saying Y here you can choose to randomize the following areas:
76701 + - top of the task's kernel stack
76702 + - top of the task's userland stack
76703 + - base address for mmap() requests that do not specify one
76704 + (this includes all libraries)
76705 + - base address of the main executable
76706 +
76707 + It is strongly recommended to say Y here as address space layout
76708 + randomization has negligible impact on performance yet it provides
76709 + a very effective protection.
76710 +
76711 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76712 + this feature on a per file basis.
76713 +
76714 +config PAX_RANDKSTACK
76715 + bool "Randomize kernel stack base"
76716 + depends on X86_TSC && X86
76717 + help
76718 + By saying Y here the kernel will randomize every task's kernel
76719 + stack on every system call. This will not only force an attacker
76720 + to guess it but also prevent him from making use of possible
76721 + leaked information about it.
76722 +
76723 + Since the kernel stack is a rather scarce resource, randomization
76724 + may cause unexpected stack overflows, therefore you should very
76725 + carefully test your system. Note that once enabled in the kernel
76726 + configuration, this feature cannot be disabled on a per file basis.
76727 +
76728 +config PAX_RANDUSTACK
76729 + bool "Randomize user stack base"
76730 + depends on PAX_ASLR
76731 + help
76732 + By saying Y here the kernel will randomize every task's userland
76733 + stack. The randomization is done in two steps where the second
76734 + one may apply a big amount of shift to the top of the stack and
76735 + cause problems for programs that want to use lots of memory (more
76736 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76737 + For this reason the second step can be controlled by 'chpax' or
76738 + 'paxctl' on a per file basis.
76739 +
76740 +config PAX_RANDMMAP
76741 + bool "Randomize mmap() base"
76742 + depends on PAX_ASLR
76743 + help
76744 + By saying Y here the kernel will use a randomized base address for
76745 + mmap() requests that do not specify one themselves. As a result
76746 + all dynamically loaded libraries will appear at random addresses
76747 + and therefore be harder to exploit by a technique where an attacker
76748 + attempts to execute library code for his purposes (e.g. spawn a
76749 + shell from an exploited program that is running at an elevated
76750 + privilege level).
76751 +
76752 + Furthermore, if a program is relinked as a dynamic ELF file, its
76753 + base address will be randomized as well, completing the full
76754 + randomization of the address space layout. Attacking such programs
76755 + becomes a guess game. You can find an example of doing this at
76756 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76757 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76758 +
76759 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76760 + feature on a per file basis.
76761 +
76762 +endmenu
76763 +
76764 +menu "Miscellaneous hardening features"
76765 +
76766 +config PAX_MEMORY_SANITIZE
76767 + bool "Sanitize all freed memory"
76768 + depends on !HIBERNATION
76769 + help
76770 + By saying Y here the kernel will erase memory pages as soon as they
76771 + are freed. This in turn reduces the lifetime of data stored in the
76772 + pages, making it less likely that sensitive information such as
76773 + passwords, cryptographic secrets, etc stay in memory for too long.
76774 +
76775 + This is especially useful for programs whose runtime is short, long
76776 + lived processes and the kernel itself benefit from this as long as
76777 + they operate on whole memory pages and ensure timely freeing of pages
76778 + that may hold sensitive information.
76779 +
76780 + The tradeoff is performance impact, on a single CPU system kernel
76781 + compilation sees a 3% slowdown, other systems and workloads may vary
76782 + and you are advised to test this feature on your expected workload
76783 + before deploying it.
76784 +
76785 + Note that this feature does not protect data stored in live pages,
76786 + e.g., process memory swapped to disk may stay there for a long time.
76787 +
76788 +config PAX_MEMORY_STACKLEAK
76789 + bool "Sanitize kernel stack"
76790 + depends on X86
76791 + help
76792 + By saying Y here the kernel will erase the kernel stack before it
76793 + returns from a system call. This in turn reduces the information
76794 + that a kernel stack leak bug can reveal.
76795 +
76796 + Note that such a bug can still leak information that was put on
76797 + the stack by the current system call (the one eventually triggering
76798 + the bug) but traces of earlier system calls on the kernel stack
76799 + cannot leak anymore.
76800 +
76801 + The tradeoff is performance impact: on a single CPU system kernel
76802 + compilation sees a 1% slowdown, other systems and workloads may vary
76803 + and you are advised to test this feature on your expected workload
76804 + before deploying it.
76805 +
76806 + Note: full support for this feature requires gcc with plugin support
76807 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
76808 + versions means that functions with large enough stack frames may
76809 + leave uninitialized memory behind that may be exposed to a later
76810 + syscall leaking the stack.
76811 +
76812 +config PAX_MEMORY_UDEREF
76813 + bool "Prevent invalid userland pointer dereference"
76814 + depends on X86 && !UML_X86 && !XEN
76815 + select PAX_PER_CPU_PGD if X86_64
76816 + help
76817 + By saying Y here the kernel will be prevented from dereferencing
76818 + userland pointers in contexts where the kernel expects only kernel
76819 + pointers. This is both a useful runtime debugging feature and a
76820 + security measure that prevents exploiting a class of kernel bugs.
76821 +
76822 + The tradeoff is that some virtualization solutions may experience
76823 + a huge slowdown and therefore you should not enable this feature
76824 + for kernels meant to run in such environments. Whether a given VM
76825 + solution is affected or not is best determined by simply trying it
76826 + out, the performance impact will be obvious right on boot as this
76827 + mechanism engages from very early on. A good rule of thumb is that
76828 + VMs running on CPUs without hardware virtualization support (i.e.,
76829 + the majority of IA-32 CPUs) will likely experience the slowdown.
76830 +
76831 +config PAX_REFCOUNT
76832 + bool "Prevent various kernel object reference counter overflows"
76833 + depends on GRKERNSEC && (X86 || SPARC64)
76834 + help
76835 + By saying Y here the kernel will detect and prevent overflowing
76836 + various (but not all) kinds of object reference counters. Such
76837 + overflows can normally occur due to bugs only and are often, if
76838 + not always, exploitable.
76839 +
76840 + The tradeoff is that data structures protected by an overflowed
76841 + refcount will never be freed and therefore will leak memory. Note
76842 + that this leak also happens even without this protection but in
76843 + that case the overflow can eventually trigger the freeing of the
76844 + data structure while it is still being used elsewhere, resulting
76845 + in the exploitable situation that this feature prevents.
76846 +
76847 + Since this has a negligible performance impact, you should enable
76848 + this feature.
76849 +
76850 +config PAX_USERCOPY
76851 + bool "Harden heap object copies between kernel and userland"
76852 + depends on X86 || PPC || SPARC || ARM
76853 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76854 + help
76855 + By saying Y here the kernel will enforce the size of heap objects
76856 + when they are copied in either direction between the kernel and
76857 + userland, even if only a part of the heap object is copied.
76858 +
76859 + Specifically, this checking prevents information leaking from the
76860 + kernel heap during kernel to userland copies (if the kernel heap
76861 + object is otherwise fully initialized) and prevents kernel heap
76862 + overflows during userland to kernel copies.
76863 +
76864 + Note that the current implementation provides the strictest bounds
76865 + checks for the SLUB allocator.
76866 +
76867 + Enabling this option also enables per-slab cache protection against
76868 + data in a given cache being copied into/out of via userland
76869 + accessors. Though the whitelist of regions will be reduced over
76870 + time, it notably protects important data structures like task structs.
76871 +
76872 + If frame pointers are enabled on x86, this option will also restrict
76873 + copies into and out of the kernel stack to local variables within a
76874 + single frame.
76875 +
76876 + Since this has a negligible performance impact, you should enable
76877 + this feature.
76878 +
76879 +endmenu
76880 +
76881 +endmenu
76882 +
76883 config KEYS
76884 bool "Enable access key retention support"
76885 help
76886 @@ -169,7 +790,7 @@ config INTEL_TXT
76887 config LSM_MMAP_MIN_ADDR
76888 int "Low address space for LSM to protect from user allocation"
76889 depends on SECURITY && SECURITY_SELINUX
76890 - default 32768 if ARM
76891 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76892 default 65536
76893 help
76894 This is the portion of low virtual memory which should be protected
76895 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
76896 index 3783202..1852837 100644
76897 --- a/security/apparmor/lsm.c
76898 +++ b/security/apparmor/lsm.c
76899 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
76900 return error;
76901 }
76902
76903 -static struct security_operations apparmor_ops = {
76904 +static struct security_operations apparmor_ops __read_only = {
76905 .name = "apparmor",
76906
76907 .ptrace_access_check = apparmor_ptrace_access_check,
76908 diff --git a/security/commoncap.c b/security/commoncap.c
76909 index ee4f848..a320c64 100644
76910 --- a/security/commoncap.c
76911 +++ b/security/commoncap.c
76912 @@ -28,6 +28,7 @@
76913 #include <linux/prctl.h>
76914 #include <linux/securebits.h>
76915 #include <linux/user_namespace.h>
76916 +#include <net/sock.h>
76917
76918 /*
76919 * If a non-root user executes a setuid-root binary in
76920 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
76921
76922 int cap_netlink_recv(struct sk_buff *skb, int cap)
76923 {
76924 - if (!cap_raised(current_cap(), cap))
76925 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
76926 return -EPERM;
76927 return 0;
76928 }
76929 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
76930 {
76931 const struct cred *cred = current_cred();
76932
76933 + if (gr_acl_enable_at_secure())
76934 + return 1;
76935 +
76936 if (cred->uid != 0) {
76937 if (bprm->cap_effective)
76938 return 1;
76939 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
76940 index 3ccf7ac..d73ad64 100644
76941 --- a/security/integrity/ima/ima.h
76942 +++ b/security/integrity/ima/ima.h
76943 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76944 extern spinlock_t ima_queue_lock;
76945
76946 struct ima_h_table {
76947 - atomic_long_t len; /* number of stored measurements in the list */
76948 - atomic_long_t violations;
76949 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
76950 + atomic_long_unchecked_t violations;
76951 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
76952 };
76953 extern struct ima_h_table ima_htable;
76954 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
76955 index 88a2788..581ab92 100644
76956 --- a/security/integrity/ima/ima_api.c
76957 +++ b/security/integrity/ima/ima_api.c
76958 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76959 int result;
76960
76961 /* can overflow, only indicator */
76962 - atomic_long_inc(&ima_htable.violations);
76963 + atomic_long_inc_unchecked(&ima_htable.violations);
76964
76965 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
76966 if (!entry) {
76967 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
76968 index c5c5a72..2ad942f 100644
76969 --- a/security/integrity/ima/ima_audit.c
76970 +++ b/security/integrity/ima/ima_audit.c
76971 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
76972 audit_log_format(ab, " name=");
76973 audit_log_untrustedstring(ab, fname);
76974 }
76975 - if (inode)
76976 - audit_log_format(ab, " dev=%s ino=%lu",
76977 - inode->i_sb->s_id, inode->i_ino);
76978 + if (inode) {
76979 + audit_log_format(ab, " dev=");
76980 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76981 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76982 + }
76983 audit_log_format(ab, " res=%d", !result ? 0 : 1);
76984 audit_log_end(ab);
76985 }
76986 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
76987 index e1aa2b4..52027bf 100644
76988 --- a/security/integrity/ima/ima_fs.c
76989 +++ b/security/integrity/ima/ima_fs.c
76990 @@ -28,12 +28,12 @@
76991 static int valid_policy = 1;
76992 #define TMPBUFLEN 12
76993 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
76994 - loff_t *ppos, atomic_long_t *val)
76995 + loff_t *ppos, atomic_long_unchecked_t *val)
76996 {
76997 char tmpbuf[TMPBUFLEN];
76998 ssize_t len;
76999
77000 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
77001 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
77002 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
77003 }
77004
77005 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
77006 index 55a6271..ad829c3 100644
77007 --- a/security/integrity/ima/ima_queue.c
77008 +++ b/security/integrity/ima/ima_queue.c
77009 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
77010 INIT_LIST_HEAD(&qe->later);
77011 list_add_tail_rcu(&qe->later, &ima_measurements);
77012
77013 - atomic_long_inc(&ima_htable.len);
77014 + atomic_long_inc_unchecked(&ima_htable.len);
77015 key = ima_hash_key(entry->digest);
77016 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
77017 return 0;
77018 diff --git a/security/keys/compat.c b/security/keys/compat.c
77019 index 4c48e13..7abdac9 100644
77020 --- a/security/keys/compat.c
77021 +++ b/security/keys/compat.c
77022 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
77023 if (ret == 0)
77024 goto no_payload_free;
77025
77026 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77027 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77028
77029 if (iov != iovstack)
77030 kfree(iov);
77031 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
77032 index 0b3f5d7..892c8a6 100644
77033 --- a/security/keys/keyctl.c
77034 +++ b/security/keys/keyctl.c
77035 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
77036 /*
77037 * Copy the iovec data from userspace
77038 */
77039 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77040 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
77041 unsigned ioc)
77042 {
77043 for (; ioc > 0; ioc--) {
77044 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77045 * If successful, 0 will be returned.
77046 */
77047 long keyctl_instantiate_key_common(key_serial_t id,
77048 - const struct iovec *payload_iov,
77049 + const struct iovec __user *payload_iov,
77050 unsigned ioc,
77051 size_t plen,
77052 key_serial_t ringid)
77053 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
77054 [0].iov_len = plen
77055 };
77056
77057 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
77058 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
77059 }
77060
77061 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
77062 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
77063 if (ret == 0)
77064 goto no_payload_free;
77065
77066 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77067 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77068
77069 if (iov != iovstack)
77070 kfree(iov);
77071 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
77072 index 37a7f3b..86dc19f 100644
77073 --- a/security/keys/keyring.c
77074 +++ b/security/keys/keyring.c
77075 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
77076 ret = -EFAULT;
77077
77078 for (loop = 0; loop < klist->nkeys; loop++) {
77079 + key_serial_t serial;
77080 key = klist->keys[loop];
77081 + serial = key->serial;
77082
77083 tmp = sizeof(key_serial_t);
77084 if (tmp > buflen)
77085 tmp = buflen;
77086
77087 - if (copy_to_user(buffer,
77088 - &key->serial,
77089 - tmp) != 0)
77090 + if (copy_to_user(buffer, &serial, tmp))
77091 goto error;
77092
77093 buflen -= tmp;
77094 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
77095 index 893af8a..ba9237c 100644
77096 --- a/security/lsm_audit.c
77097 +++ b/security/lsm_audit.c
77098 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77099 audit_log_d_path(ab, "path=", &a->u.path);
77100
77101 inode = a->u.path.dentry->d_inode;
77102 - if (inode)
77103 - audit_log_format(ab, " dev=%s ino=%lu",
77104 - inode->i_sb->s_id,
77105 - inode->i_ino);
77106 + if (inode) {
77107 + audit_log_format(ab, " dev=");
77108 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77109 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77110 + }
77111 break;
77112 }
77113 case LSM_AUDIT_DATA_DENTRY: {
77114 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77115 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
77116
77117 inode = a->u.dentry->d_inode;
77118 - if (inode)
77119 - audit_log_format(ab, " dev=%s ino=%lu",
77120 - inode->i_sb->s_id,
77121 - inode->i_ino);
77122 + if (inode) {
77123 + audit_log_format(ab, " dev=");
77124 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77125 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77126 + }
77127 break;
77128 }
77129 case LSM_AUDIT_DATA_INODE: {
77130 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77131 dentry->d_name.name);
77132 dput(dentry);
77133 }
77134 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
77135 - inode->i_ino);
77136 + audit_log_format(ab, " dev=");
77137 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77138 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77139 break;
77140 }
77141 case LSM_AUDIT_DATA_TASK:
77142 diff --git a/security/min_addr.c b/security/min_addr.c
77143 index f728728..6457a0c 100644
77144 --- a/security/min_addr.c
77145 +++ b/security/min_addr.c
77146 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
77147 */
77148 static void update_mmap_min_addr(void)
77149 {
77150 +#ifndef SPARC
77151 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
77152 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
77153 mmap_min_addr = dac_mmap_min_addr;
77154 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
77155 #else
77156 mmap_min_addr = dac_mmap_min_addr;
77157 #endif
77158 +#endif
77159 }
77160
77161 /*
77162 diff --git a/security/security.c b/security/security.c
77163 index e2f684a..8d62ef5 100644
77164 --- a/security/security.c
77165 +++ b/security/security.c
77166 @@ -26,8 +26,8 @@
77167 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
77168 CONFIG_DEFAULT_SECURITY;
77169
77170 -static struct security_operations *security_ops;
77171 -static struct security_operations default_security_ops = {
77172 +static struct security_operations *security_ops __read_only;
77173 +static struct security_operations default_security_ops __read_only = {
77174 .name = "default",
77175 };
77176
77177 @@ -68,7 +68,9 @@ int __init security_init(void)
77178
77179 void reset_security_ops(void)
77180 {
77181 + pax_open_kernel();
77182 security_ops = &default_security_ops;
77183 + pax_close_kernel();
77184 }
77185
77186 /* Save user chosen LSM */
77187 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
77188 index 1126c10..effb32b 100644
77189 --- a/security/selinux/hooks.c
77190 +++ b/security/selinux/hooks.c
77191 @@ -94,8 +94,6 @@
77192
77193 #define NUM_SEL_MNT_OPTS 5
77194
77195 -extern struct security_operations *security_ops;
77196 -
77197 /* SECMARK reference count */
77198 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
77199
77200 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
77201
77202 #endif
77203
77204 -static struct security_operations selinux_ops = {
77205 +static struct security_operations selinux_ops __read_only = {
77206 .name = "selinux",
77207
77208 .ptrace_access_check = selinux_ptrace_access_check,
77209 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
77210 index b43813c..74be837 100644
77211 --- a/security/selinux/include/xfrm.h
77212 +++ b/security/selinux/include/xfrm.h
77213 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
77214
77215 static inline void selinux_xfrm_notify_policyload(void)
77216 {
77217 - atomic_inc(&flow_cache_genid);
77218 + atomic_inc_unchecked(&flow_cache_genid);
77219 }
77220 #else
77221 static inline int selinux_xfrm_enabled(void)
77222 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
77223 index 7db62b4..ee4d949 100644
77224 --- a/security/smack/smack_lsm.c
77225 +++ b/security/smack/smack_lsm.c
77226 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
77227 return 0;
77228 }
77229
77230 -struct security_operations smack_ops = {
77231 +struct security_operations smack_ops __read_only = {
77232 .name = "smack",
77233
77234 .ptrace_access_check = smack_ptrace_access_check,
77235 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
77236 index 4b327b6..646c57a 100644
77237 --- a/security/tomoyo/tomoyo.c
77238 +++ b/security/tomoyo/tomoyo.c
77239 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
77240 * tomoyo_security_ops is a "struct security_operations" which is used for
77241 * registering TOMOYO.
77242 */
77243 -static struct security_operations tomoyo_security_ops = {
77244 +static struct security_operations tomoyo_security_ops __read_only = {
77245 .name = "tomoyo",
77246 .cred_alloc_blank = tomoyo_cred_alloc_blank,
77247 .cred_prepare = tomoyo_cred_prepare,
77248 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
77249 index 762af68..7103453 100644
77250 --- a/sound/aoa/codecs/onyx.c
77251 +++ b/sound/aoa/codecs/onyx.c
77252 @@ -54,7 +54,7 @@ struct onyx {
77253 spdif_locked:1,
77254 analog_locked:1,
77255 original_mute:2;
77256 - int open_count;
77257 + local_t open_count;
77258 struct codec_info *codec_info;
77259
77260 /* mutex serializes concurrent access to the device
77261 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
77262 struct onyx *onyx = cii->codec_data;
77263
77264 mutex_lock(&onyx->mutex);
77265 - onyx->open_count++;
77266 + local_inc(&onyx->open_count);
77267 mutex_unlock(&onyx->mutex);
77268
77269 return 0;
77270 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
77271 struct onyx *onyx = cii->codec_data;
77272
77273 mutex_lock(&onyx->mutex);
77274 - onyx->open_count--;
77275 - if (!onyx->open_count)
77276 + if (local_dec_and_test(&onyx->open_count))
77277 onyx->spdif_locked = onyx->analog_locked = 0;
77278 mutex_unlock(&onyx->mutex);
77279
77280 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
77281 index ffd2025..df062c9 100644
77282 --- a/sound/aoa/codecs/onyx.h
77283 +++ b/sound/aoa/codecs/onyx.h
77284 @@ -11,6 +11,7 @@
77285 #include <linux/i2c.h>
77286 #include <asm/pmac_low_i2c.h>
77287 #include <asm/prom.h>
77288 +#include <asm/local.h>
77289
77290 /* PCM3052 register definitions */
77291
77292 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
77293 index 3cc4b86..af0a951 100644
77294 --- a/sound/core/oss/pcm_oss.c
77295 +++ b/sound/core/oss/pcm_oss.c
77296 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
77297 if (in_kernel) {
77298 mm_segment_t fs;
77299 fs = snd_enter_user();
77300 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77301 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77302 snd_leave_user(fs);
77303 } else {
77304 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77305 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77306 }
77307 if (ret != -EPIPE && ret != -ESTRPIPE)
77308 break;
77309 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
77310 if (in_kernel) {
77311 mm_segment_t fs;
77312 fs = snd_enter_user();
77313 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77314 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77315 snd_leave_user(fs);
77316 } else {
77317 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77318 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77319 }
77320 if (ret == -EPIPE) {
77321 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
77322 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
77323 struct snd_pcm_plugin_channel *channels;
77324 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
77325 if (!in_kernel) {
77326 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
77327 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
77328 return -EFAULT;
77329 buf = runtime->oss.buffer;
77330 }
77331 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
77332 }
77333 } else {
77334 tmp = snd_pcm_oss_write2(substream,
77335 - (const char __force *)buf,
77336 + (const char __force_kernel *)buf,
77337 runtime->oss.period_bytes, 0);
77338 if (tmp <= 0)
77339 goto err;
77340 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
77341 struct snd_pcm_runtime *runtime = substream->runtime;
77342 snd_pcm_sframes_t frames, frames1;
77343 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
77344 - char __user *final_dst = (char __force __user *)buf;
77345 + char __user *final_dst = (char __force_user *)buf;
77346 if (runtime->oss.plugin_first) {
77347 struct snd_pcm_plugin_channel *channels;
77348 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
77349 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
77350 xfer += tmp;
77351 runtime->oss.buffer_used -= tmp;
77352 } else {
77353 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
77354 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
77355 runtime->oss.period_bytes, 0);
77356 if (tmp <= 0)
77357 goto err;
77358 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
77359 size1);
77360 size1 /= runtime->channels; /* frames */
77361 fs = snd_enter_user();
77362 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
77363 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
77364 snd_leave_user(fs);
77365 }
77366 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
77367 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
77368 index 91cdf94..4085161 100644
77369 --- a/sound/core/pcm_compat.c
77370 +++ b/sound/core/pcm_compat.c
77371 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
77372 int err;
77373
77374 fs = snd_enter_user();
77375 - err = snd_pcm_delay(substream, &delay);
77376 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
77377 snd_leave_user(fs);
77378 if (err < 0)
77379 return err;
77380 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
77381 index 25ed9fe..24c46e9 100644
77382 --- a/sound/core/pcm_native.c
77383 +++ b/sound/core/pcm_native.c
77384 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
77385 switch (substream->stream) {
77386 case SNDRV_PCM_STREAM_PLAYBACK:
77387 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
77388 - (void __user *)arg);
77389 + (void __force_user *)arg);
77390 break;
77391 case SNDRV_PCM_STREAM_CAPTURE:
77392 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
77393 - (void __user *)arg);
77394 + (void __force_user *)arg);
77395 break;
77396 default:
77397 result = -EINVAL;
77398 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
77399 index 5cf8d65..912a79c 100644
77400 --- a/sound/core/seq/seq_device.c
77401 +++ b/sound/core/seq/seq_device.c
77402 @@ -64,7 +64,7 @@ struct ops_list {
77403 int argsize; /* argument size */
77404
77405 /* operators */
77406 - struct snd_seq_dev_ops ops;
77407 + struct snd_seq_dev_ops *ops;
77408
77409 /* registred devices */
77410 struct list_head dev_list; /* list of devices */
77411 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
77412
77413 mutex_lock(&ops->reg_mutex);
77414 /* copy driver operators */
77415 - ops->ops = *entry;
77416 + ops->ops = entry;
77417 ops->driver |= DRIVER_LOADED;
77418 ops->argsize = argsize;
77419
77420 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
77421 dev->name, ops->id, ops->argsize, dev->argsize);
77422 return -EINVAL;
77423 }
77424 - if (ops->ops.init_device(dev) >= 0) {
77425 + if (ops->ops->init_device(dev) >= 0) {
77426 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
77427 ops->num_init_devices++;
77428 } else {
77429 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
77430 dev->name, ops->id, ops->argsize, dev->argsize);
77431 return -EINVAL;
77432 }
77433 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
77434 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
77435 dev->status = SNDRV_SEQ_DEVICE_FREE;
77436 dev->driver_data = NULL;
77437 ops->num_init_devices--;
77438 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
77439 index f24bf9a..1f7b67c 100644
77440 --- a/sound/drivers/mts64.c
77441 +++ b/sound/drivers/mts64.c
77442 @@ -29,6 +29,7 @@
77443 #include <sound/initval.h>
77444 #include <sound/rawmidi.h>
77445 #include <sound/control.h>
77446 +#include <asm/local.h>
77447
77448 #define CARD_NAME "Miditerminal 4140"
77449 #define DRIVER_NAME "MTS64"
77450 @@ -67,7 +68,7 @@ struct mts64 {
77451 struct pardevice *pardev;
77452 int pardev_claimed;
77453
77454 - int open_count;
77455 + local_t open_count;
77456 int current_midi_output_port;
77457 int current_midi_input_port;
77458 u8 mode[MTS64_NUM_INPUT_PORTS];
77459 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77460 {
77461 struct mts64 *mts = substream->rmidi->private_data;
77462
77463 - if (mts->open_count == 0) {
77464 + if (local_read(&mts->open_count) == 0) {
77465 /* We don't need a spinlock here, because this is just called
77466 if the device has not been opened before.
77467 So there aren't any IRQs from the device */
77468 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77469
77470 msleep(50);
77471 }
77472 - ++(mts->open_count);
77473 + local_inc(&mts->open_count);
77474
77475 return 0;
77476 }
77477 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77478 struct mts64 *mts = substream->rmidi->private_data;
77479 unsigned long flags;
77480
77481 - --(mts->open_count);
77482 - if (mts->open_count == 0) {
77483 + if (local_dec_return(&mts->open_count) == 0) {
77484 /* We need the spinlock_irqsave here because we can still
77485 have IRQs at this point */
77486 spin_lock_irqsave(&mts->lock, flags);
77487 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77488
77489 msleep(500);
77490
77491 - } else if (mts->open_count < 0)
77492 - mts->open_count = 0;
77493 + } else if (local_read(&mts->open_count) < 0)
77494 + local_set(&mts->open_count, 0);
77495
77496 return 0;
77497 }
77498 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
77499 index b953fb4..1999c01 100644
77500 --- a/sound/drivers/opl4/opl4_lib.c
77501 +++ b/sound/drivers/opl4/opl4_lib.c
77502 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
77503 MODULE_DESCRIPTION("OPL4 driver");
77504 MODULE_LICENSE("GPL");
77505
77506 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
77507 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
77508 {
77509 int timeout = 10;
77510 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
77511 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
77512 index f664823..590c745 100644
77513 --- a/sound/drivers/portman2x4.c
77514 +++ b/sound/drivers/portman2x4.c
77515 @@ -48,6 +48,7 @@
77516 #include <sound/initval.h>
77517 #include <sound/rawmidi.h>
77518 #include <sound/control.h>
77519 +#include <asm/local.h>
77520
77521 #define CARD_NAME "Portman 2x4"
77522 #define DRIVER_NAME "portman"
77523 @@ -85,7 +86,7 @@ struct portman {
77524 struct pardevice *pardev;
77525 int pardev_claimed;
77526
77527 - int open_count;
77528 + local_t open_count;
77529 int mode[PORTMAN_NUM_INPUT_PORTS];
77530 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
77531 };
77532 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
77533 index 87657dd..a8268d4 100644
77534 --- a/sound/firewire/amdtp.c
77535 +++ b/sound/firewire/amdtp.c
77536 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
77537 ptr = s->pcm_buffer_pointer + data_blocks;
77538 if (ptr >= pcm->runtime->buffer_size)
77539 ptr -= pcm->runtime->buffer_size;
77540 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
77541 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
77542
77543 s->pcm_period_pointer += data_blocks;
77544 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
77545 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
77546 */
77547 void amdtp_out_stream_update(struct amdtp_out_stream *s)
77548 {
77549 - ACCESS_ONCE(s->source_node_id_field) =
77550 + ACCESS_ONCE_RW(s->source_node_id_field) =
77551 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
77552 }
77553 EXPORT_SYMBOL(amdtp_out_stream_update);
77554 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
77555 index 537a9cb..8e8c8e9 100644
77556 --- a/sound/firewire/amdtp.h
77557 +++ b/sound/firewire/amdtp.h
77558 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
77559 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77560 struct snd_pcm_substream *pcm)
77561 {
77562 - ACCESS_ONCE(s->pcm) = pcm;
77563 + ACCESS_ONCE_RW(s->pcm) = pcm;
77564 }
77565
77566 /**
77567 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
77568 index cd094ec..eca1277 100644
77569 --- a/sound/firewire/isight.c
77570 +++ b/sound/firewire/isight.c
77571 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
77572 ptr += count;
77573 if (ptr >= runtime->buffer_size)
77574 ptr -= runtime->buffer_size;
77575 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77576 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77577
77578 isight->period_counter += count;
77579 if (isight->period_counter >= runtime->period_size) {
77580 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
77581 if (err < 0)
77582 return err;
77583
77584 - ACCESS_ONCE(isight->pcm_active) = true;
77585 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77586
77587 return 0;
77588 }
77589 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
77590 {
77591 struct isight *isight = substream->private_data;
77592
77593 - ACCESS_ONCE(isight->pcm_active) = false;
77594 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77595
77596 mutex_lock(&isight->mutex);
77597 isight_stop_streaming(isight);
77598 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
77599
77600 switch (cmd) {
77601 case SNDRV_PCM_TRIGGER_START:
77602 - ACCESS_ONCE(isight->pcm_running) = true;
77603 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77604 break;
77605 case SNDRV_PCM_TRIGGER_STOP:
77606 - ACCESS_ONCE(isight->pcm_running) = false;
77607 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77608 break;
77609 default:
77610 return -EINVAL;
77611 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
77612 index c94578d..0794ac1 100644
77613 --- a/sound/isa/cmi8330.c
77614 +++ b/sound/isa/cmi8330.c
77615 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77616
77617 struct snd_pcm *pcm;
77618 struct snd_cmi8330_stream {
77619 - struct snd_pcm_ops ops;
77620 + snd_pcm_ops_no_const ops;
77621 snd_pcm_open_callback_t open;
77622 void *private_data; /* sb or wss */
77623 } streams[2];
77624 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
77625 index 733b014..56ce96f 100644
77626 --- a/sound/oss/sb_audio.c
77627 +++ b/sound/oss/sb_audio.c
77628 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77629 buf16 = (signed short *)(localbuf + localoffs);
77630 while (c)
77631 {
77632 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77633 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77634 if (copy_from_user(lbuf8,
77635 userbuf+useroffs + p,
77636 locallen))
77637 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
77638 index 09d4648..cf234c7 100644
77639 --- a/sound/oss/swarm_cs4297a.c
77640 +++ b/sound/oss/swarm_cs4297a.c
77641 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77642 {
77643 struct cs4297a_state *s;
77644 u32 pwr, id;
77645 - mm_segment_t fs;
77646 int rval;
77647 #ifndef CONFIG_BCM_CS4297A_CSWARM
77648 u64 cfg;
77649 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77650 if (!rval) {
77651 char *sb1250_duart_present;
77652
77653 +#if 0
77654 + mm_segment_t fs;
77655 fs = get_fs();
77656 set_fs(KERNEL_DS);
77657 -#if 0
77658 val = SOUND_MASK_LINE;
77659 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77660 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77661 val = initvol[i].vol;
77662 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77663 }
77664 + set_fs(fs);
77665 // cs4297a_write_ac97(s, 0x18, 0x0808);
77666 #else
77667 // cs4297a_write_ac97(s, 0x5e, 0x180);
77668 cs4297a_write_ac97(s, 0x02, 0x0808);
77669 cs4297a_write_ac97(s, 0x18, 0x0808);
77670 #endif
77671 - set_fs(fs);
77672
77673 list_add(&s->list, &cs4297a_devs);
77674
77675 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
77676 index 5644711..a2aebc1 100644
77677 --- a/sound/pci/hda/hda_codec.h
77678 +++ b/sound/pci/hda/hda_codec.h
77679 @@ -611,7 +611,7 @@ struct hda_bus_ops {
77680 /* notify power-up/down from codec to controller */
77681 void (*pm_notify)(struct hda_bus *bus);
77682 #endif
77683 -};
77684 +} __no_const;
77685
77686 /* template to pass to the bus constructor */
77687 struct hda_bus_template {
77688 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77689 #endif
77690 void (*reboot_notify)(struct hda_codec *codec);
77691 };
77692 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77693
77694 /* record for amp information cache */
77695 struct hda_cache_head {
77696 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77697 struct snd_pcm_substream *substream);
77698 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77699 struct snd_pcm_substream *substream);
77700 -};
77701 +} __no_const;
77702
77703 /* PCM information for each substream */
77704 struct hda_pcm_stream {
77705 @@ -801,7 +802,7 @@ struct hda_codec {
77706 const char *modelname; /* model name for preset */
77707
77708 /* set by patch */
77709 - struct hda_codec_ops patch_ops;
77710 + hda_codec_ops_no_const patch_ops;
77711
77712 /* PCM to create, set by patch_ops.build_pcms callback */
77713 unsigned int num_pcms;
77714 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
77715 index 0da778a..bc38b84 100644
77716 --- a/sound/pci/ice1712/ice1712.h
77717 +++ b/sound/pci/ice1712/ice1712.h
77718 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77719 unsigned int mask_flags; /* total mask bits */
77720 struct snd_akm4xxx_ops {
77721 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77722 - } ops;
77723 + } __no_const ops;
77724 };
77725
77726 struct snd_ice1712_spdif {
77727 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77728 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77729 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77730 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77731 - } ops;
77732 + } __no_const ops;
77733 };
77734
77735
77736 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
77737 index 03ee4e3..be86b46 100644
77738 --- a/sound/pci/ymfpci/ymfpci_main.c
77739 +++ b/sound/pci/ymfpci/ymfpci_main.c
77740 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
77741 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77742 break;
77743 }
77744 - if (atomic_read(&chip->interrupt_sleep_count)) {
77745 - atomic_set(&chip->interrupt_sleep_count, 0);
77746 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77747 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77748 wake_up(&chip->interrupt_sleep);
77749 }
77750 __end:
77751 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
77752 continue;
77753 init_waitqueue_entry(&wait, current);
77754 add_wait_queue(&chip->interrupt_sleep, &wait);
77755 - atomic_inc(&chip->interrupt_sleep_count);
77756 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77757 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77758 remove_wait_queue(&chip->interrupt_sleep, &wait);
77759 }
77760 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
77761 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77762 spin_unlock(&chip->reg_lock);
77763
77764 - if (atomic_read(&chip->interrupt_sleep_count)) {
77765 - atomic_set(&chip->interrupt_sleep_count, 0);
77766 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77767 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77768 wake_up(&chip->interrupt_sleep);
77769 }
77770 }
77771 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
77772 spin_lock_init(&chip->reg_lock);
77773 spin_lock_init(&chip->voice_lock);
77774 init_waitqueue_head(&chip->interrupt_sleep);
77775 - atomic_set(&chip->interrupt_sleep_count, 0);
77776 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77777 chip->card = card;
77778 chip->pci = pci;
77779 chip->irq = -1;
77780 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
77781 index ee15337..e2187a6 100644
77782 --- a/sound/soc/soc-pcm.c
77783 +++ b/sound/soc/soc-pcm.c
77784 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
77785 }
77786
77787 /* ASoC PCM operations */
77788 -static struct snd_pcm_ops soc_pcm_ops = {
77789 +static snd_pcm_ops_no_const soc_pcm_ops = {
77790 .open = soc_pcm_open,
77791 .close = soc_pcm_close,
77792 .hw_params = soc_pcm_hw_params,
77793 diff --git a/sound/usb/card.h b/sound/usb/card.h
77794 index a39edcc..1014050 100644
77795 --- a/sound/usb/card.h
77796 +++ b/sound/usb/card.h
77797 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77798 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77799 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77800 };
77801 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77802
77803 struct snd_usb_substream {
77804 struct snd_usb_stream *stream;
77805 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77806 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77807 spinlock_t lock;
77808
77809 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77810 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77811 int last_frame_number; /* stored frame number */
77812 int last_delay; /* stored delay */
77813 };
77814 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
77815 new file mode 100644
77816 index 0000000..29b6b75
77817 --- /dev/null
77818 +++ b/tools/gcc/Makefile
77819 @@ -0,0 +1,21 @@
77820 +#CC := gcc
77821 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77822 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77823 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
77824 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
77825 +
77826 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99
77827 +
77828 +hostlibs-y := constify_plugin.so
77829 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77830 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77831 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77832 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77833 +
77834 +always := $(hostlibs-y)
77835 +
77836 +constify_plugin-objs := constify_plugin.o
77837 +stackleak_plugin-objs := stackleak_plugin.o
77838 +kallocstat_plugin-objs := kallocstat_plugin.o
77839 +kernexec_plugin-objs := kernexec_plugin.o
77840 +checker_plugin-objs := checker_plugin.o
77841 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
77842 new file mode 100644
77843 index 0000000..d41b5af
77844 --- /dev/null
77845 +++ b/tools/gcc/checker_plugin.c
77846 @@ -0,0 +1,171 @@
77847 +/*
77848 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77849 + * Licensed under the GPL v2
77850 + *
77851 + * Note: the choice of the license means that the compilation process is
77852 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77853 + * but for the kernel it doesn't matter since it doesn't link against
77854 + * any of the gcc libraries
77855 + *
77856 + * gcc plugin to implement various sparse (source code checker) features
77857 + *
77858 + * TODO:
77859 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77860 + *
77861 + * BUGS:
77862 + * - none known
77863 + */
77864 +#include "gcc-plugin.h"
77865 +#include "config.h"
77866 +#include "system.h"
77867 +#include "coretypes.h"
77868 +#include "tree.h"
77869 +#include "tree-pass.h"
77870 +#include "flags.h"
77871 +#include "intl.h"
77872 +#include "toplev.h"
77873 +#include "plugin.h"
77874 +//#include "expr.h" where are you...
77875 +#include "diagnostic.h"
77876 +#include "plugin-version.h"
77877 +#include "tm.h"
77878 +#include "function.h"
77879 +#include "basic-block.h"
77880 +#include "gimple.h"
77881 +#include "rtl.h"
77882 +#include "emit-rtl.h"
77883 +#include "tree-flow.h"
77884 +#include "target.h"
77885 +
77886 +extern void c_register_addr_space (const char *str, addr_space_t as);
77887 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77888 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77889 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77890 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77891 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77892 +
77893 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77894 +extern rtx emit_move_insn(rtx x, rtx y);
77895 +
77896 +int plugin_is_GPL_compatible;
77897 +
77898 +static struct plugin_info checker_plugin_info = {
77899 + .version = "201111150100",
77900 +};
77901 +
77902 +#define ADDR_SPACE_KERNEL 0
77903 +#define ADDR_SPACE_FORCE_KERNEL 1
77904 +#define ADDR_SPACE_USER 2
77905 +#define ADDR_SPACE_FORCE_USER 3
77906 +#define ADDR_SPACE_IOMEM 0
77907 +#define ADDR_SPACE_FORCE_IOMEM 0
77908 +#define ADDR_SPACE_PERCPU 0
77909 +#define ADDR_SPACE_FORCE_PERCPU 0
77910 +#define ADDR_SPACE_RCU 0
77911 +#define ADDR_SPACE_FORCE_RCU 0
77912 +
77913 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77914 +{
77915 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77916 +}
77917 +
77918 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77919 +{
77920 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77921 +}
77922 +
77923 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77924 +{
77925 + return default_addr_space_valid_pointer_mode(mode, as);
77926 +}
77927 +
77928 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77929 +{
77930 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77931 +}
77932 +
77933 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77934 +{
77935 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77936 +}
77937 +
77938 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77939 +{
77940 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77941 + return true;
77942 +
77943 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77944 + return true;
77945 +
77946 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77947 + return true;
77948 +
77949 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77950 + return true;
77951 +
77952 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77953 + return true;
77954 +
77955 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77956 + return true;
77957 +
77958 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77959 + return true;
77960 +
77961 + return subset == superset;
77962 +}
77963 +
77964 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77965 +{
77966 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77967 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77968 +
77969 + return op;
77970 +}
77971 +
77972 +static void register_checker_address_spaces(void *event_data, void *data)
77973 +{
77974 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77975 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77976 + c_register_addr_space("__user", ADDR_SPACE_USER);
77977 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77978 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77979 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77980 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77981 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77982 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77983 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77984 +
77985 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77986 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
77987 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77988 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77989 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77990 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
77991 + targetm.addr_space.convert = checker_addr_space_convert;
77992 +}
77993 +
77994 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77995 +{
77996 + const char * const plugin_name = plugin_info->base_name;
77997 + const int argc = plugin_info->argc;
77998 + const struct plugin_argument * const argv = plugin_info->argv;
77999 + int i;
78000 +
78001 + if (!plugin_default_version_check(version, &gcc_version)) {
78002 + error(G_("incompatible gcc/plugin versions"));
78003 + return 1;
78004 + }
78005 +
78006 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
78007 +
78008 + for (i = 0; i < argc; ++i)
78009 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78010 +
78011 + if (TARGET_64BIT == 0)
78012 + return 0;
78013 +
78014 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
78015 +
78016 + return 0;
78017 +}
78018 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
78019 new file mode 100644
78020 index 0000000..704a564
78021 --- /dev/null
78022 +++ b/tools/gcc/constify_plugin.c
78023 @@ -0,0 +1,303 @@
78024 +/*
78025 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
78026 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
78027 + * Licensed under the GPL v2, or (at your option) v3
78028 + *
78029 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
78030 + *
78031 + * Homepage:
78032 + * http://www.grsecurity.net/~ephox/const_plugin/
78033 + *
78034 + * Usage:
78035 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
78036 + * $ gcc -fplugin=constify_plugin.so test.c -O2
78037 + */
78038 +
78039 +#include "gcc-plugin.h"
78040 +#include "config.h"
78041 +#include "system.h"
78042 +#include "coretypes.h"
78043 +#include "tree.h"
78044 +#include "tree-pass.h"
78045 +#include "flags.h"
78046 +#include "intl.h"
78047 +#include "toplev.h"
78048 +#include "plugin.h"
78049 +#include "diagnostic.h"
78050 +#include "plugin-version.h"
78051 +#include "tm.h"
78052 +#include "function.h"
78053 +#include "basic-block.h"
78054 +#include "gimple.h"
78055 +#include "rtl.h"
78056 +#include "emit-rtl.h"
78057 +#include "tree-flow.h"
78058 +
78059 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
78060 +
78061 +int plugin_is_GPL_compatible;
78062 +
78063 +static struct plugin_info const_plugin_info = {
78064 + .version = "201111150100",
78065 + .help = "no-constify\tturn off constification\n",
78066 +};
78067 +
78068 +static void constify_type(tree type);
78069 +static bool walk_struct(tree node);
78070 +
78071 +static tree deconstify_type(tree old_type)
78072 +{
78073 + tree new_type, field;
78074 +
78075 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
78076 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
78077 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
78078 + DECL_FIELD_CONTEXT(field) = new_type;
78079 + TYPE_READONLY(new_type) = 0;
78080 + C_TYPE_FIELDS_READONLY(new_type) = 0;
78081 + return new_type;
78082 +}
78083 +
78084 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78085 +{
78086 + tree type;
78087 +
78088 + *no_add_attrs = true;
78089 + if (TREE_CODE(*node) == FUNCTION_DECL) {
78090 + error("%qE attribute does not apply to functions", name);
78091 + return NULL_TREE;
78092 + }
78093 +
78094 + if (TREE_CODE(*node) == VAR_DECL) {
78095 + error("%qE attribute does not apply to variables", name);
78096 + return NULL_TREE;
78097 + }
78098 +
78099 + if (TYPE_P(*node)) {
78100 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
78101 + *no_add_attrs = false;
78102 + else
78103 + error("%qE attribute applies to struct and union types only", name);
78104 + return NULL_TREE;
78105 + }
78106 +
78107 + type = TREE_TYPE(*node);
78108 +
78109 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
78110 + error("%qE attribute applies to struct and union types only", name);
78111 + return NULL_TREE;
78112 + }
78113 +
78114 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
78115 + error("%qE attribute is already applied to the type", name);
78116 + return NULL_TREE;
78117 + }
78118 +
78119 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
78120 + error("%qE attribute used on type that is not constified", name);
78121 + return NULL_TREE;
78122 + }
78123 +
78124 + if (TREE_CODE(*node) == TYPE_DECL) {
78125 + TREE_TYPE(*node) = deconstify_type(type);
78126 + TREE_READONLY(*node) = 0;
78127 + return NULL_TREE;
78128 + }
78129 +
78130 + return NULL_TREE;
78131 +}
78132 +
78133 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78134 +{
78135 + *no_add_attrs = true;
78136 + if (!TYPE_P(*node)) {
78137 + error("%qE attribute applies to types only", name);
78138 + return NULL_TREE;
78139 + }
78140 +
78141 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
78142 + error("%qE attribute applies to struct and union types only", name);
78143 + return NULL_TREE;
78144 + }
78145 +
78146 + *no_add_attrs = false;
78147 + constify_type(*node);
78148 + return NULL_TREE;
78149 +}
78150 +
78151 +static struct attribute_spec no_const_attr = {
78152 + .name = "no_const",
78153 + .min_length = 0,
78154 + .max_length = 0,
78155 + .decl_required = false,
78156 + .type_required = false,
78157 + .function_type_required = false,
78158 + .handler = handle_no_const_attribute,
78159 +#if BUILDING_GCC_VERSION >= 4007
78160 + .affects_type_identity = true
78161 +#endif
78162 +};
78163 +
78164 +static struct attribute_spec do_const_attr = {
78165 + .name = "do_const",
78166 + .min_length = 0,
78167 + .max_length = 0,
78168 + .decl_required = false,
78169 + .type_required = false,
78170 + .function_type_required = false,
78171 + .handler = handle_do_const_attribute,
78172 +#if BUILDING_GCC_VERSION >= 4007
78173 + .affects_type_identity = true
78174 +#endif
78175 +};
78176 +
78177 +static void register_attributes(void *event_data, void *data)
78178 +{
78179 + register_attribute(&no_const_attr);
78180 + register_attribute(&do_const_attr);
78181 +}
78182 +
78183 +static void constify_type(tree type)
78184 +{
78185 + TYPE_READONLY(type) = 1;
78186 + C_TYPE_FIELDS_READONLY(type) = 1;
78187 +}
78188 +
78189 +static bool is_fptr(tree field)
78190 +{
78191 + tree ptr = TREE_TYPE(field);
78192 +
78193 + if (TREE_CODE(ptr) != POINTER_TYPE)
78194 + return false;
78195 +
78196 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
78197 +}
78198 +
78199 +static bool walk_struct(tree node)
78200 +{
78201 + tree field;
78202 +
78203 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
78204 + return false;
78205 +
78206 + if (TYPE_FIELDS(node) == NULL_TREE)
78207 + return false;
78208 +
78209 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
78210 + tree type = TREE_TYPE(field);
78211 + enum tree_code code = TREE_CODE(type);
78212 + if (code == RECORD_TYPE || code == UNION_TYPE) {
78213 + if (!(walk_struct(type)))
78214 + return false;
78215 + } else if (!is_fptr(field) && !TREE_READONLY(field))
78216 + return false;
78217 + }
78218 + return true;
78219 +}
78220 +
78221 +static void finish_type(void *event_data, void *data)
78222 +{
78223 + tree type = (tree)event_data;
78224 +
78225 + if (type == NULL_TREE)
78226 + return;
78227 +
78228 + if (TYPE_READONLY(type))
78229 + return;
78230 +
78231 + if (walk_struct(type))
78232 + constify_type(type);
78233 +}
78234 +
78235 +static unsigned int check_local_variables(void);
78236 +
78237 +struct gimple_opt_pass pass_local_variable = {
78238 + {
78239 + .type = GIMPLE_PASS,
78240 + .name = "check_local_variables",
78241 + .gate = NULL,
78242 + .execute = check_local_variables,
78243 + .sub = NULL,
78244 + .next = NULL,
78245 + .static_pass_number = 0,
78246 + .tv_id = TV_NONE,
78247 + .properties_required = 0,
78248 + .properties_provided = 0,
78249 + .properties_destroyed = 0,
78250 + .todo_flags_start = 0,
78251 + .todo_flags_finish = 0
78252 + }
78253 +};
78254 +
78255 +static unsigned int check_local_variables(void)
78256 +{
78257 + tree var;
78258 + referenced_var_iterator rvi;
78259 +
78260 +#if BUILDING_GCC_VERSION == 4005
78261 + FOR_EACH_REFERENCED_VAR(var, rvi) {
78262 +#else
78263 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
78264 +#endif
78265 + tree type = TREE_TYPE(var);
78266 +
78267 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
78268 + continue;
78269 +
78270 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
78271 + continue;
78272 +
78273 + if (!TYPE_READONLY(type))
78274 + continue;
78275 +
78276 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
78277 +// continue;
78278 +
78279 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
78280 +// continue;
78281 +
78282 + if (walk_struct(type)) {
78283 + error("constified variable %qE cannot be local", var);
78284 + return 1;
78285 + }
78286 + }
78287 + return 0;
78288 +}
78289 +
78290 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78291 +{
78292 + const char * const plugin_name = plugin_info->base_name;
78293 + const int argc = plugin_info->argc;
78294 + const struct plugin_argument * const argv = plugin_info->argv;
78295 + int i;
78296 + bool constify = true;
78297 +
78298 + struct register_pass_info local_variable_pass_info = {
78299 + .pass = &pass_local_variable.pass,
78300 + .reference_pass_name = "*referenced_vars",
78301 + .ref_pass_instance_number = 0,
78302 + .pos_op = PASS_POS_INSERT_AFTER
78303 + };
78304 +
78305 + if (!plugin_default_version_check(version, &gcc_version)) {
78306 + error(G_("incompatible gcc/plugin versions"));
78307 + return 1;
78308 + }
78309 +
78310 + for (i = 0; i < argc; ++i) {
78311 + if (!(strcmp(argv[i].key, "no-constify"))) {
78312 + constify = false;
78313 + continue;
78314 + }
78315 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78316 + }
78317 +
78318 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
78319 + if (constify) {
78320 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
78321 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
78322 + }
78323 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
78324 +
78325 + return 0;
78326 +}
78327 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
78328 new file mode 100644
78329 index 0000000..a5eabce
78330 --- /dev/null
78331 +++ b/tools/gcc/kallocstat_plugin.c
78332 @@ -0,0 +1,167 @@
78333 +/*
78334 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78335 + * Licensed under the GPL v2
78336 + *
78337 + * Note: the choice of the license means that the compilation process is
78338 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78339 + * but for the kernel it doesn't matter since it doesn't link against
78340 + * any of the gcc libraries
78341 + *
78342 + * gcc plugin to find the distribution of k*alloc sizes
78343 + *
78344 + * TODO:
78345 + *
78346 + * BUGS:
78347 + * - none known
78348 + */
78349 +#include "gcc-plugin.h"
78350 +#include "config.h"
78351 +#include "system.h"
78352 +#include "coretypes.h"
78353 +#include "tree.h"
78354 +#include "tree-pass.h"
78355 +#include "flags.h"
78356 +#include "intl.h"
78357 +#include "toplev.h"
78358 +#include "plugin.h"
78359 +//#include "expr.h" where are you...
78360 +#include "diagnostic.h"
78361 +#include "plugin-version.h"
78362 +#include "tm.h"
78363 +#include "function.h"
78364 +#include "basic-block.h"
78365 +#include "gimple.h"
78366 +#include "rtl.h"
78367 +#include "emit-rtl.h"
78368 +
78369 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78370 +
78371 +int plugin_is_GPL_compatible;
78372 +
78373 +static const char * const kalloc_functions[] = {
78374 + "__kmalloc",
78375 + "kmalloc",
78376 + "kmalloc_large",
78377 + "kmalloc_node",
78378 + "kmalloc_order",
78379 + "kmalloc_order_trace",
78380 + "kmalloc_slab",
78381 + "kzalloc",
78382 + "kzalloc_node",
78383 +};
78384 +
78385 +static struct plugin_info kallocstat_plugin_info = {
78386 + .version = "201111150100",
78387 +};
78388 +
78389 +static unsigned int execute_kallocstat(void);
78390 +
78391 +static struct gimple_opt_pass kallocstat_pass = {
78392 + .pass = {
78393 + .type = GIMPLE_PASS,
78394 + .name = "kallocstat",
78395 + .gate = NULL,
78396 + .execute = execute_kallocstat,
78397 + .sub = NULL,
78398 + .next = NULL,
78399 + .static_pass_number = 0,
78400 + .tv_id = TV_NONE,
78401 + .properties_required = 0,
78402 + .properties_provided = 0,
78403 + .properties_destroyed = 0,
78404 + .todo_flags_start = 0,
78405 + .todo_flags_finish = 0
78406 + }
78407 +};
78408 +
78409 +static bool is_kalloc(const char *fnname)
78410 +{
78411 + size_t i;
78412 +
78413 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
78414 + if (!strcmp(fnname, kalloc_functions[i]))
78415 + return true;
78416 + return false;
78417 +}
78418 +
78419 +static unsigned int execute_kallocstat(void)
78420 +{
78421 + basic_block bb;
78422 +
78423 + // 1. loop through BBs and GIMPLE statements
78424 + FOR_EACH_BB(bb) {
78425 + gimple_stmt_iterator gsi;
78426 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78427 + // gimple match:
78428 + tree fndecl, size;
78429 + gimple call_stmt;
78430 + const char *fnname;
78431 +
78432 + // is it a call
78433 + call_stmt = gsi_stmt(gsi);
78434 + if (!is_gimple_call(call_stmt))
78435 + continue;
78436 + fndecl = gimple_call_fndecl(call_stmt);
78437 + if (fndecl == NULL_TREE)
78438 + continue;
78439 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
78440 + continue;
78441 +
78442 + // is it a call to k*alloc
78443 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
78444 + if (!is_kalloc(fnname))
78445 + continue;
78446 +
78447 + // is the size arg the result of a simple const assignment
78448 + size = gimple_call_arg(call_stmt, 0);
78449 + while (true) {
78450 + gimple def_stmt;
78451 + expanded_location xloc;
78452 + size_t size_val;
78453 +
78454 + if (TREE_CODE(size) != SSA_NAME)
78455 + break;
78456 + def_stmt = SSA_NAME_DEF_STMT(size);
78457 + if (!def_stmt || !is_gimple_assign(def_stmt))
78458 + break;
78459 + if (gimple_num_ops(def_stmt) != 2)
78460 + break;
78461 + size = gimple_assign_rhs1(def_stmt);
78462 + if (!TREE_CONSTANT(size))
78463 + continue;
78464 + xloc = expand_location(gimple_location(def_stmt));
78465 + if (!xloc.file)
78466 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
78467 + size_val = TREE_INT_CST_LOW(size);
78468 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
78469 + break;
78470 + }
78471 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78472 +//debug_tree(gimple_call_fn(call_stmt));
78473 +//print_node(stderr, "pax", fndecl, 4);
78474 + }
78475 + }
78476 +
78477 + return 0;
78478 +}
78479 +
78480 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78481 +{
78482 + const char * const plugin_name = plugin_info->base_name;
78483 + struct register_pass_info kallocstat_pass_info = {
78484 + .pass = &kallocstat_pass.pass,
78485 + .reference_pass_name = "ssa",
78486 + .ref_pass_instance_number = 0,
78487 + .pos_op = PASS_POS_INSERT_AFTER
78488 + };
78489 +
78490 + if (!plugin_default_version_check(version, &gcc_version)) {
78491 + error(G_("incompatible gcc/plugin versions"));
78492 + return 1;
78493 + }
78494 +
78495 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
78496 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
78497 +
78498 + return 0;
78499 +}
78500 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
78501 new file mode 100644
78502 index 0000000..008f159
78503 --- /dev/null
78504 +++ b/tools/gcc/kernexec_plugin.c
78505 @@ -0,0 +1,427 @@
78506 +/*
78507 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78508 + * Licensed under the GPL v2
78509 + *
78510 + * Note: the choice of the license means that the compilation process is
78511 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78512 + * but for the kernel it doesn't matter since it doesn't link against
78513 + * any of the gcc libraries
78514 + *
78515 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
78516 + *
78517 + * TODO:
78518 + *
78519 + * BUGS:
78520 + * - none known
78521 + */
78522 +#include "gcc-plugin.h"
78523 +#include "config.h"
78524 +#include "system.h"
78525 +#include "coretypes.h"
78526 +#include "tree.h"
78527 +#include "tree-pass.h"
78528 +#include "flags.h"
78529 +#include "intl.h"
78530 +#include "toplev.h"
78531 +#include "plugin.h"
78532 +//#include "expr.h" where are you...
78533 +#include "diagnostic.h"
78534 +#include "plugin-version.h"
78535 +#include "tm.h"
78536 +#include "function.h"
78537 +#include "basic-block.h"
78538 +#include "gimple.h"
78539 +#include "rtl.h"
78540 +#include "emit-rtl.h"
78541 +#include "tree-flow.h"
78542 +
78543 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78544 +extern rtx emit_move_insn(rtx x, rtx y);
78545 +
78546 +int plugin_is_GPL_compatible;
78547 +
78548 +static struct plugin_info kernexec_plugin_info = {
78549 + .version = "201111291120",
78550 + .help = "method=[bts|or]\tinstrumentation method\n"
78551 +};
78552 +
78553 +static unsigned int execute_kernexec_reload(void);
78554 +static unsigned int execute_kernexec_fptr(void);
78555 +static unsigned int execute_kernexec_retaddr(void);
78556 +static bool kernexec_cmodel_check(void);
78557 +
78558 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
78559 +static void (*kernexec_instrument_retaddr)(rtx);
78560 +
78561 +static struct gimple_opt_pass kernexec_reload_pass = {
78562 + .pass = {
78563 + .type = GIMPLE_PASS,
78564 + .name = "kernexec_reload",
78565 + .gate = kernexec_cmodel_check,
78566 + .execute = execute_kernexec_reload,
78567 + .sub = NULL,
78568 + .next = NULL,
78569 + .static_pass_number = 0,
78570 + .tv_id = TV_NONE,
78571 + .properties_required = 0,
78572 + .properties_provided = 0,
78573 + .properties_destroyed = 0,
78574 + .todo_flags_start = 0,
78575 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78576 + }
78577 +};
78578 +
78579 +static struct gimple_opt_pass kernexec_fptr_pass = {
78580 + .pass = {
78581 + .type = GIMPLE_PASS,
78582 + .name = "kernexec_fptr",
78583 + .gate = kernexec_cmodel_check,
78584 + .execute = execute_kernexec_fptr,
78585 + .sub = NULL,
78586 + .next = NULL,
78587 + .static_pass_number = 0,
78588 + .tv_id = TV_NONE,
78589 + .properties_required = 0,
78590 + .properties_provided = 0,
78591 + .properties_destroyed = 0,
78592 + .todo_flags_start = 0,
78593 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78594 + }
78595 +};
78596 +
78597 +static struct rtl_opt_pass kernexec_retaddr_pass = {
78598 + .pass = {
78599 + .type = RTL_PASS,
78600 + .name = "kernexec_retaddr",
78601 + .gate = kernexec_cmodel_check,
78602 + .execute = execute_kernexec_retaddr,
78603 + .sub = NULL,
78604 + .next = NULL,
78605 + .static_pass_number = 0,
78606 + .tv_id = TV_NONE,
78607 + .properties_required = 0,
78608 + .properties_provided = 0,
78609 + .properties_destroyed = 0,
78610 + .todo_flags_start = 0,
78611 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
78612 + }
78613 +};
78614 +
78615 +static bool kernexec_cmodel_check(void)
78616 +{
78617 + tree section;
78618 +
78619 + if (ix86_cmodel != CM_KERNEL)
78620 + return false;
78621 +
78622 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
78623 + if (!section || !TREE_VALUE(section))
78624 + return true;
78625 +
78626 + section = TREE_VALUE(TREE_VALUE(section));
78627 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
78628 + return true;
78629 +
78630 + return false;
78631 +}
78632 +
78633 +/*
78634 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
78635 + */
78636 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
78637 +{
78638 + gimple asm_movabs_stmt;
78639 +
78640 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
78641 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
78642 + gimple_asm_set_volatile(asm_movabs_stmt, true);
78643 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
78644 + update_stmt(asm_movabs_stmt);
78645 +}
78646 +
78647 +/*
78648 + * find all asm() stmts that clobber r10 and add a reload of r10
78649 + */
78650 +static unsigned int execute_kernexec_reload(void)
78651 +{
78652 + basic_block bb;
78653 +
78654 + // 1. loop through BBs and GIMPLE statements
78655 + FOR_EACH_BB(bb) {
78656 + gimple_stmt_iterator gsi;
78657 +
78658 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78659 + // gimple match: __asm__ ("" : : : "r10");
78660 + gimple asm_stmt;
78661 + size_t nclobbers;
78662 +
78663 + // is it an asm ...
78664 + asm_stmt = gsi_stmt(gsi);
78665 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
78666 + continue;
78667 +
78668 + // ... clobbering r10
78669 + nclobbers = gimple_asm_nclobbers(asm_stmt);
78670 + while (nclobbers--) {
78671 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
78672 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
78673 + continue;
78674 + kernexec_reload_fptr_mask(&gsi);
78675 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
78676 + break;
78677 + }
78678 + }
78679 + }
78680 +
78681 + return 0;
78682 +}
78683 +
78684 +/*
78685 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78686 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78687 + */
78688 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
78689 +{
78690 + gimple assign_intptr, assign_new_fptr, call_stmt;
78691 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78692 +
78693 + call_stmt = gsi_stmt(*gsi);
78694 + old_fptr = gimple_call_fn(call_stmt);
78695 +
78696 + // create temporary unsigned long variable used for bitops and cast fptr to it
78697 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
78698 + add_referenced_var(intptr);
78699 + mark_sym_for_renaming(intptr);
78700 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78701 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78702 + update_stmt(assign_intptr);
78703 +
78704 + // apply logical or to temporary unsigned long and bitmask
78705 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78706 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78707 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78708 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78709 + update_stmt(assign_intptr);
78710 +
78711 + // cast temporary unsigned long back to a temporary fptr variable
78712 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
78713 + add_referenced_var(new_fptr);
78714 + mark_sym_for_renaming(new_fptr);
78715 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78716 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
78717 + update_stmt(assign_new_fptr);
78718 +
78719 + // replace call stmt fn with the new fptr
78720 + gimple_call_set_fn(call_stmt, new_fptr);
78721 + update_stmt(call_stmt);
78722 +}
78723 +
78724 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
78725 +{
78726 + gimple asm_or_stmt, call_stmt;
78727 + tree old_fptr, new_fptr, input, output;
78728 + VEC(tree, gc) *inputs = NULL;
78729 + VEC(tree, gc) *outputs = NULL;
78730 +
78731 + call_stmt = gsi_stmt(*gsi);
78732 + old_fptr = gimple_call_fn(call_stmt);
78733 +
78734 + // create temporary fptr variable
78735 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
78736 + add_referenced_var(new_fptr);
78737 + mark_sym_for_renaming(new_fptr);
78738 +
78739 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
78740 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
78741 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
78742 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
78743 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
78744 + VEC_safe_push(tree, gc, inputs, input);
78745 + VEC_safe_push(tree, gc, outputs, output);
78746 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
78747 + gimple_asm_set_volatile(asm_or_stmt, true);
78748 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
78749 + update_stmt(asm_or_stmt);
78750 +
78751 + // replace call stmt fn with the new fptr
78752 + gimple_call_set_fn(call_stmt, new_fptr);
78753 + update_stmt(call_stmt);
78754 +}
78755 +
78756 +/*
78757 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78758 + */
78759 +static unsigned int execute_kernexec_fptr(void)
78760 +{
78761 + basic_block bb;
78762 +
78763 + // 1. loop through BBs and GIMPLE statements
78764 + FOR_EACH_BB(bb) {
78765 + gimple_stmt_iterator gsi;
78766 +
78767 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78768 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78769 + tree fn;
78770 + gimple call_stmt;
78771 +
78772 + // is it a call ...
78773 + call_stmt = gsi_stmt(gsi);
78774 + if (!is_gimple_call(call_stmt))
78775 + continue;
78776 + fn = gimple_call_fn(call_stmt);
78777 + if (TREE_CODE(fn) == ADDR_EXPR)
78778 + continue;
78779 + if (TREE_CODE(fn) != SSA_NAME)
78780 + gcc_unreachable();
78781 +
78782 + // ... through a function pointer
78783 + fn = SSA_NAME_VAR(fn);
78784 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78785 + continue;
78786 + fn = TREE_TYPE(fn);
78787 + if (TREE_CODE(fn) != POINTER_TYPE)
78788 + continue;
78789 + fn = TREE_TYPE(fn);
78790 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78791 + continue;
78792 +
78793 + kernexec_instrument_fptr(&gsi);
78794 +
78795 +//debug_tree(gimple_call_fn(call_stmt));
78796 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78797 + }
78798 + }
78799 +
78800 + return 0;
78801 +}
78802 +
78803 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78804 +static void kernexec_instrument_retaddr_bts(rtx insn)
78805 +{
78806 + rtx btsq;
78807 + rtvec argvec, constraintvec, labelvec;
78808 + int line;
78809 +
78810 + // create asm volatile("btsq $63,(%%rsp)":::)
78811 + argvec = rtvec_alloc(0);
78812 + constraintvec = rtvec_alloc(0);
78813 + labelvec = rtvec_alloc(0);
78814 + line = expand_location(RTL_LOCATION(insn)).line;
78815 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78816 + MEM_VOLATILE_P(btsq) = 1;
78817 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
78818 + emit_insn_before(btsq, insn);
78819 +}
78820 +
78821 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
78822 +static void kernexec_instrument_retaddr_or(rtx insn)
78823 +{
78824 + rtx orq;
78825 + rtvec argvec, constraintvec, labelvec;
78826 + int line;
78827 +
78828 + // create asm volatile("orq %%r10,(%%rsp)":::)
78829 + argvec = rtvec_alloc(0);
78830 + constraintvec = rtvec_alloc(0);
78831 + labelvec = rtvec_alloc(0);
78832 + line = expand_location(RTL_LOCATION(insn)).line;
78833 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78834 + MEM_VOLATILE_P(orq) = 1;
78835 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
78836 + emit_insn_before(orq, insn);
78837 +}
78838 +
78839 +/*
78840 + * find all asm level function returns and forcibly set the highest bit of the return address
78841 + */
78842 +static unsigned int execute_kernexec_retaddr(void)
78843 +{
78844 + rtx insn;
78845 +
78846 + // 1. find function returns
78847 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78848 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78849 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78850 + rtx body;
78851 +
78852 + // is it a retn
78853 + if (!JUMP_P(insn))
78854 + continue;
78855 + body = PATTERN(insn);
78856 + if (GET_CODE(body) == PARALLEL)
78857 + body = XVECEXP(body, 0, 0);
78858 + if (GET_CODE(body) != RETURN)
78859 + continue;
78860 + kernexec_instrument_retaddr(insn);
78861 + }
78862 +
78863 +// print_simple_rtl(stderr, get_insns());
78864 +// print_rtl(stderr, get_insns());
78865 +
78866 + return 0;
78867 +}
78868 +
78869 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78870 +{
78871 + const char * const plugin_name = plugin_info->base_name;
78872 + const int argc = plugin_info->argc;
78873 + const struct plugin_argument * const argv = plugin_info->argv;
78874 + int i;
78875 + struct register_pass_info kernexec_reload_pass_info = {
78876 + .pass = &kernexec_reload_pass.pass,
78877 + .reference_pass_name = "ssa",
78878 + .ref_pass_instance_number = 0,
78879 + .pos_op = PASS_POS_INSERT_AFTER
78880 + };
78881 + struct register_pass_info kernexec_fptr_pass_info = {
78882 + .pass = &kernexec_fptr_pass.pass,
78883 + .reference_pass_name = "ssa",
78884 + .ref_pass_instance_number = 0,
78885 + .pos_op = PASS_POS_INSERT_AFTER
78886 + };
78887 + struct register_pass_info kernexec_retaddr_pass_info = {
78888 + .pass = &kernexec_retaddr_pass.pass,
78889 + .reference_pass_name = "pro_and_epilogue",
78890 + .ref_pass_instance_number = 0,
78891 + .pos_op = PASS_POS_INSERT_AFTER
78892 + };
78893 +
78894 + if (!plugin_default_version_check(version, &gcc_version)) {
78895 + error(G_("incompatible gcc/plugin versions"));
78896 + return 1;
78897 + }
78898 +
78899 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78900 +
78901 + if (TARGET_64BIT == 0)
78902 + return 0;
78903 +
78904 + for (i = 0; i < argc; ++i) {
78905 + if (!strcmp(argv[i].key, "method")) {
78906 + if (!argv[i].value) {
78907 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78908 + continue;
78909 + }
78910 + if (!strcmp(argv[i].value, "bts")) {
78911 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
78912 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
78913 + } else if (!strcmp(argv[i].value, "or")) {
78914 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
78915 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
78916 + fix_register("r10", 1, 1);
78917 + } else
78918 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78919 + continue;
78920 + }
78921 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78922 + }
78923 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
78924 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
78925 +
78926 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
78927 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
78928 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78929 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78930 +
78931 + return 0;
78932 +}
78933 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
78934 new file mode 100644
78935 index 0000000..8b61031
78936 --- /dev/null
78937 +++ b/tools/gcc/stackleak_plugin.c
78938 @@ -0,0 +1,295 @@
78939 +/*
78940 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78941 + * Licensed under the GPL v2
78942 + *
78943 + * Note: the choice of the license means that the compilation process is
78944 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78945 + * but for the kernel it doesn't matter since it doesn't link against
78946 + * any of the gcc libraries
78947 + *
78948 + * gcc plugin to help implement various PaX features
78949 + *
78950 + * - track lowest stack pointer
78951 + *
78952 + * TODO:
78953 + * - initialize all local variables
78954 + *
78955 + * BUGS:
78956 + * - none known
78957 + */
78958 +#include "gcc-plugin.h"
78959 +#include "config.h"
78960 +#include "system.h"
78961 +#include "coretypes.h"
78962 +#include "tree.h"
78963 +#include "tree-pass.h"
78964 +#include "flags.h"
78965 +#include "intl.h"
78966 +#include "toplev.h"
78967 +#include "plugin.h"
78968 +//#include "expr.h" where are you...
78969 +#include "diagnostic.h"
78970 +#include "plugin-version.h"
78971 +#include "tm.h"
78972 +#include "function.h"
78973 +#include "basic-block.h"
78974 +#include "gimple.h"
78975 +#include "rtl.h"
78976 +#include "emit-rtl.h"
78977 +
78978 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78979 +
78980 +int plugin_is_GPL_compatible;
78981 +
78982 +static int track_frame_size = -1;
78983 +static const char track_function[] = "pax_track_stack";
78984 +static const char check_function[] = "pax_check_alloca";
78985 +static bool init_locals;
78986 +
78987 +static struct plugin_info stackleak_plugin_info = {
78988 + .version = "201111150100",
78989 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78990 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
78991 +};
78992 +
78993 +static bool gate_stackleak_track_stack(void);
78994 +static unsigned int execute_stackleak_tree_instrument(void);
78995 +static unsigned int execute_stackleak_final(void);
78996 +
78997 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78998 + .pass = {
78999 + .type = GIMPLE_PASS,
79000 + .name = "stackleak_tree_instrument",
79001 + .gate = gate_stackleak_track_stack,
79002 + .execute = execute_stackleak_tree_instrument,
79003 + .sub = NULL,
79004 + .next = NULL,
79005 + .static_pass_number = 0,
79006 + .tv_id = TV_NONE,
79007 + .properties_required = PROP_gimple_leh | PROP_cfg,
79008 + .properties_provided = 0,
79009 + .properties_destroyed = 0,
79010 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
79011 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
79012 + }
79013 +};
79014 +
79015 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
79016 + .pass = {
79017 + .type = RTL_PASS,
79018 + .name = "stackleak_final",
79019 + .gate = gate_stackleak_track_stack,
79020 + .execute = execute_stackleak_final,
79021 + .sub = NULL,
79022 + .next = NULL,
79023 + .static_pass_number = 0,
79024 + .tv_id = TV_NONE,
79025 + .properties_required = 0,
79026 + .properties_provided = 0,
79027 + .properties_destroyed = 0,
79028 + .todo_flags_start = 0,
79029 + .todo_flags_finish = TODO_dump_func
79030 + }
79031 +};
79032 +
79033 +static bool gate_stackleak_track_stack(void)
79034 +{
79035 + return track_frame_size >= 0;
79036 +}
79037 +
79038 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
79039 +{
79040 + gimple check_alloca;
79041 + tree fndecl, fntype, alloca_size;
79042 +
79043 + // insert call to void pax_check_alloca(unsigned long size)
79044 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
79045 + fndecl = build_fn_decl(check_function, fntype);
79046 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
79047 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
79048 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
79049 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
79050 +}
79051 +
79052 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
79053 +{
79054 + gimple track_stack;
79055 + tree fndecl, fntype;
79056 +
79057 + // insert call to void pax_track_stack(void)
79058 + fntype = build_function_type_list(void_type_node, NULL_TREE);
79059 + fndecl = build_fn_decl(track_function, fntype);
79060 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
79061 + track_stack = gimple_build_call(fndecl, 0);
79062 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
79063 +}
79064 +
79065 +#if BUILDING_GCC_VERSION == 4005
79066 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
79067 +{
79068 + tree fndecl;
79069 +
79070 + if (!is_gimple_call(stmt))
79071 + return false;
79072 + fndecl = gimple_call_fndecl(stmt);
79073 + if (!fndecl)
79074 + return false;
79075 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
79076 + return false;
79077 +// print_node(stderr, "pax", fndecl, 4);
79078 + return DECL_FUNCTION_CODE(fndecl) == code;
79079 +}
79080 +#endif
79081 +
79082 +static bool is_alloca(gimple stmt)
79083 +{
79084 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
79085 + return true;
79086 +
79087 +#if BUILDING_GCC_VERSION >= 4007
79088 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
79089 + return true;
79090 +#endif
79091 +
79092 + return false;
79093 +}
79094 +
79095 +static unsigned int execute_stackleak_tree_instrument(void)
79096 +{
79097 + basic_block bb, entry_bb;
79098 + bool prologue_instrumented = false;
79099 +
79100 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
79101 +
79102 + // 1. loop through BBs and GIMPLE statements
79103 + FOR_EACH_BB(bb) {
79104 + gimple_stmt_iterator gsi;
79105 +
79106 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79107 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
79108 + if (!is_alloca(gsi_stmt(gsi)))
79109 + continue;
79110 +
79111 + // 2. insert stack overflow check before each __builtin_alloca call
79112 + stackleak_check_alloca(&gsi);
79113 +
79114 + // 3. insert track call after each __builtin_alloca call
79115 + stackleak_add_instrumentation(&gsi);
79116 + if (bb == entry_bb)
79117 + prologue_instrumented = true;
79118 + }
79119 + }
79120 +
79121 + // 4. insert track call at the beginning
79122 + if (!prologue_instrumented) {
79123 + gimple_stmt_iterator gsi;
79124 +
79125 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
79126 + if (dom_info_available_p(CDI_DOMINATORS))
79127 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
79128 + gsi = gsi_start_bb(bb);
79129 + stackleak_add_instrumentation(&gsi);
79130 + }
79131 +
79132 + return 0;
79133 +}
79134 +
79135 +static unsigned int execute_stackleak_final(void)
79136 +{
79137 + rtx insn;
79138 +
79139 + if (cfun->calls_alloca)
79140 + return 0;
79141 +
79142 + // keep calls only if function frame is big enough
79143 + if (get_frame_size() >= track_frame_size)
79144 + return 0;
79145 +
79146 + // 1. find pax_track_stack calls
79147 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
79148 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
79149 + rtx body;
79150 +
79151 + if (!CALL_P(insn))
79152 + continue;
79153 + body = PATTERN(insn);
79154 + if (GET_CODE(body) != CALL)
79155 + continue;
79156 + body = XEXP(body, 0);
79157 + if (GET_CODE(body) != MEM)
79158 + continue;
79159 + body = XEXP(body, 0);
79160 + if (GET_CODE(body) != SYMBOL_REF)
79161 + continue;
79162 + if (strcmp(XSTR(body, 0), track_function))
79163 + continue;
79164 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79165 + // 2. delete call
79166 + insn = delete_insn_and_edges(insn);
79167 +#if BUILDING_GCC_VERSION >= 4007
79168 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
79169 + insn = delete_insn_and_edges(insn);
79170 +#endif
79171 + }
79172 +
79173 +// print_simple_rtl(stderr, get_insns());
79174 +// print_rtl(stderr, get_insns());
79175 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79176 +
79177 + return 0;
79178 +}
79179 +
79180 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79181 +{
79182 + const char * const plugin_name = plugin_info->base_name;
79183 + const int argc = plugin_info->argc;
79184 + const struct plugin_argument * const argv = plugin_info->argv;
79185 + int i;
79186 + struct register_pass_info stackleak_tree_instrument_pass_info = {
79187 + .pass = &stackleak_tree_instrument_pass.pass,
79188 +// .reference_pass_name = "tree_profile",
79189 + .reference_pass_name = "optimized",
79190 + .ref_pass_instance_number = 0,
79191 + .pos_op = PASS_POS_INSERT_AFTER
79192 + };
79193 + struct register_pass_info stackleak_final_pass_info = {
79194 + .pass = &stackleak_final_rtl_opt_pass.pass,
79195 + .reference_pass_name = "final",
79196 + .ref_pass_instance_number = 0,
79197 + .pos_op = PASS_POS_INSERT_BEFORE
79198 + };
79199 +
79200 + if (!plugin_default_version_check(version, &gcc_version)) {
79201 + error(G_("incompatible gcc/plugin versions"));
79202 + return 1;
79203 + }
79204 +
79205 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
79206 +
79207 + for (i = 0; i < argc; ++i) {
79208 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
79209 + if (!argv[i].value) {
79210 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79211 + continue;
79212 + }
79213 + track_frame_size = atoi(argv[i].value);
79214 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
79215 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79216 + continue;
79217 + }
79218 + if (!strcmp(argv[i].key, "initialize-locals")) {
79219 + if (argv[i].value) {
79220 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79221 + continue;
79222 + }
79223 + init_locals = true;
79224 + continue;
79225 + }
79226 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79227 + }
79228 +
79229 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
79230 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
79231 +
79232 + return 0;
79233 +}
79234 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
79235 index 6789d78..4afd019 100644
79236 --- a/tools/perf/util/include/asm/alternative-asm.h
79237 +++ b/tools/perf/util/include/asm/alternative-asm.h
79238 @@ -5,4 +5,7 @@
79239
79240 #define altinstruction_entry #
79241
79242 + .macro pax_force_retaddr rip=0, reload=0
79243 + .endm
79244 +
79245 #endif
79246 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
79247 index af0f22f..9a7d479 100644
79248 --- a/usr/gen_init_cpio.c
79249 +++ b/usr/gen_init_cpio.c
79250 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
79251 int retval;
79252 int rc = -1;
79253 int namesize;
79254 - int i;
79255 + unsigned int i;
79256
79257 mode |= S_IFREG;
79258
79259 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
79260 *env_var = *expanded = '\0';
79261 strncat(env_var, start + 2, end - start - 2);
79262 strncat(expanded, new_location, start - new_location);
79263 - strncat(expanded, getenv(env_var), PATH_MAX);
79264 - strncat(expanded, end + 1, PATH_MAX);
79265 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
79266 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
79267 strncpy(new_location, expanded, PATH_MAX);
79268 + new_location[PATH_MAX] = 0;
79269 } else
79270 break;
79271 }
79272 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
79273 index d9cfb78..4f27c10 100644
79274 --- a/virt/kvm/kvm_main.c
79275 +++ b/virt/kvm/kvm_main.c
79276 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
79277
79278 static cpumask_var_t cpus_hardware_enabled;
79279 static int kvm_usage_count = 0;
79280 -static atomic_t hardware_enable_failed;
79281 +static atomic_unchecked_t hardware_enable_failed;
79282
79283 struct kmem_cache *kvm_vcpu_cache;
79284 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
79285 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
79286
79287 if (r) {
79288 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
79289 - atomic_inc(&hardware_enable_failed);
79290 + atomic_inc_unchecked(&hardware_enable_failed);
79291 printk(KERN_INFO "kvm: enabling virtualization on "
79292 "CPU%d failed\n", cpu);
79293 }
79294 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
79295
79296 kvm_usage_count++;
79297 if (kvm_usage_count == 1) {
79298 - atomic_set(&hardware_enable_failed, 0);
79299 + atomic_set_unchecked(&hardware_enable_failed, 0);
79300 on_each_cpu(hardware_enable_nolock, NULL, 1);
79301
79302 - if (atomic_read(&hardware_enable_failed)) {
79303 + if (atomic_read_unchecked(&hardware_enable_failed)) {
79304 hardware_disable_all_nolock();
79305 r = -EBUSY;
79306 }
79307 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
79308 kvm_arch_vcpu_put(vcpu);
79309 }
79310
79311 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79312 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79313 struct module *module)
79314 {
79315 int r;
79316 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79317 if (!vcpu_align)
79318 vcpu_align = __alignof__(struct kvm_vcpu);
79319 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
79320 - 0, NULL);
79321 + SLAB_USERCOPY, NULL);
79322 if (!kvm_vcpu_cache) {
79323 r = -ENOMEM;
79324 goto out_free_3;
79325 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79326 if (r)
79327 goto out_free;
79328
79329 - kvm_chardev_ops.owner = module;
79330 - kvm_vm_fops.owner = module;
79331 - kvm_vcpu_fops.owner = module;
79332 + pax_open_kernel();
79333 + *(void **)&kvm_chardev_ops.owner = module;
79334 + *(void **)&kvm_vm_fops.owner = module;
79335 + *(void **)&kvm_vcpu_fops.owner = module;
79336 + pax_close_kernel();
79337
79338 r = misc_register(&kvm_dev);
79339 if (r) {